repo_name
stringclasses
6 values
pr_number
int64
99
20.3k
pr_title
stringlengths
8
158
pr_description
stringlengths
0
6.54k
author
stringlengths
4
18
date_created
unknown
date_merged
unknown
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
37
6.57k
filepath
stringlengths
8
153
before_content
stringlengths
0
876M
after_content
stringlengths
0
876M
label
int64
-1
1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/notebooks/mesh_segmentation_dataio.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dataset Pipeline for mesh_segmentation_demo.ipynb. The shorthands used in parameter descriptions below are 'B': Batch size. 'E': Number of unique directed edges in a mesh. 'V': Number of vertices in a mesh. 'T': Number of triangles in a mesh. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.convolution import utils as conv_utils from tensorflow_graphics.geometry.representation.mesh import utils as mesh_utils from tensorflow_graphics.util import shape DEFAULT_IO_PARAMS = { 'batch_size': 8, 'shuffle_buffer_size': 100, 'is_training': True, 'parallel_threads': 5, 'mean_center': True, 'shuffle': None, 'repeat': None, } def adjacency_from_edges(edges, weights, num_edges, num_vertices): """Returns a batched sparse 1-ring adj tensor from edge list tensor. Args: edges: [B, E, 2] `int32` tensor of edges, possibly 0 padded. weights: [B, E] `float32` tensor of edge weights, possibly 0 padded. num_edges: [B] `int32` tensor of number of valid edges per batch sample. num_vertices: [B] `int32` tensor of number of valid vertices per batch sample. Returns: adj: A batched SparseTensor of weighted adjacency graph, of dense_shape [B, V, V] where V is max(num_vertices) """ edges = tf.convert_to_tensor(value=edges) weights = tf.convert_to_tensor(value=weights) num_edges = tf.convert_to_tensor(value=num_edges) num_vertices = tf.convert_to_tensor(value=num_vertices) if not edges.dtype.is_integer: raise TypeError("'edges' must have an integer type.") if not num_edges.dtype.is_integer: raise TypeError("'num_edges' must have an integer type.") if not num_vertices.dtype.is_integer: raise TypeError("'num_vertices' must have an integer type.") if not weights.dtype.is_floating: raise TypeError("'weights' must have a floating type.") shape.check_static(tensor=edges, tensor_name='edges', has_rank=3) shape.check_static(tensor=weights, tensor_name='weights', has_rank=2) shape.check_static(tensor=num_edges, tensor_name='num_edges', has_rank=1) shape.check_static( tensor=num_vertices, tensor_name='num_vertices', has_rank=1) shape.compare_dimensions( tensors=(edges, weights, num_edges, num_vertices), tensor_names=('edges', 'weights', 'num_edges', 'num_vertices'), axes=(-3, -2, -1, -1)) shape.compare_dimensions( tensors=(edges, weights), tensor_names=('edges', 'weights'), axes=(-2, -1)) batch_size = tf.shape(input=edges)[0] max_num_vertices = tf.reduce_max(input_tensor=num_vertices) max_num_edges = tf.shape(input=edges)[1] batch_col = tf.reshape(tf.range(batch_size, dtype=edges.dtype), [-1, 1, 1]) batch_col = tf.tile(batch_col, [1, max_num_edges, 1]) batch_edges = tf.concat([batch_col, edges], axis=-1) indices, _ = conv_utils.flatten_batch_to_2d(batch_edges, sizes=num_edges) values, _ = conv_utils.flatten_batch_to_2d( tf.expand_dims(weights, -1), sizes=num_edges) values = tf.squeeze(values) adjacency = tf.SparseTensor( indices=tf.cast(indices, tf.int64), values=values, dense_shape=[batch_size, max_num_vertices, max_num_vertices]) adjacency = tf.sparse.reorder(adjacency) return adjacency def get_weighted_edges(faces, self_edges=True): r"""Gets unique edges and degree weights from a triangular mesh. The shorthands used below are: `T`: The number of triangles in the mesh. `E`: The number of unique directed edges in the mesh. Args: faces: A [T, 3] `int32` numpy.ndarray of triangle vertex indices. self_edges: A `bool` flag. If true, then for every vertex 'i' an edge [i, i] is added to edge list. Returns: edges: A [E, 2] `int32` numpy.ndarray of directed edges. weights: A [E] `float32` numpy.ndarray denoting edge weights. The degree of a vertex is the number of edges incident on the vertex, including any self-edges. The weight for an edge $w_{ij}$ connecting vertex $v_i$ and vertex $v_j$ is defined as, $$ w_{ij} = 1.0 / degree(v_i) \sum_{j} w_{ij} = 1 $$ """ edges = mesh_utils.extract_unique_edges_from_triangular_mesh( faces, directed_edges=True).astype(np.int32) if self_edges: vertices = np.expand_dims(np.unique(edges[:, 0]), axis=1) self_edges = np.concatenate((vertices, vertices), axis=1) edges = np.unique(np.concatenate((edges, self_edges), axis=0), axis=0) weights = mesh_utils.get_degree_based_edge_weights(edges, dtype=np.float32) return edges, weights def _tfrecords_to_dataset(tfrecords, parallel_threads, shuffle, repeat, sloppy, max_readers=16): """Creates a TFRecordsDataset that iterates over filenames in parallel. Args: tfrecords: A list of tf.Data.TFRecords filenames. parallel_threads: The `int` number denoting number of parallel worker threads. shuffle: The `bool` flag denoting whether to shuffle the dataset. repeat: The `bool` flag denoting whether to repeat the dataset. sloppy: The `bool` flag denoting if elements are produced in deterministic order. max_readers: The `int` number denoting the maximum number of input tfrecords to interleave from in parallel. Returns: A tf.data.TFRecordDataset """ total_tfrecords = sum([len(tf.io.gfile.glob(f)) for f in tfrecords]) num_readers = min(total_tfrecords, max_readers) dataset = tf.data.Dataset.list_files(tfrecords, shuffle=shuffle) if repeat: dataset = dataset.repeat() return dataset.apply( tf.data.experimental.parallel_interleave( tf.data.TFRecordDataset, num_readers, sloppy=sloppy, buffer_output_elements=parallel_threads, prefetch_input_elements=parallel_threads)) def _parse_tfex_proto(example_proto): """Parses the tfexample proto to a raw mesh_data dictionary. Args: example_proto: A tf.Example proto storing the encoded mesh data. Returns: A mesh data dictionary with the following fields: 'num_vertices': The `int64` number of vertices in mesh. 'num_triangles': The `int64` number of triangles in mesh. 'vertices': A serialized tensor of vertex positions. 'triangles': A serialized tensor of triangle vertex indices. 'labels': A serialized tensor of per vertex class labels. """ feature_description = { 'num_vertices': tf.io.FixedLenFeature([], tf.int64, default_value=0), 'num_triangles': tf.io.FixedLenFeature([], tf.int64, default_value=0), 'vertices': tf.io.FixedLenFeature([], tf.string, default_value=''), 'triangles': tf.io.FixedLenFeature([], tf.string, default_value=''), 'labels': tf.io.FixedLenFeature([], tf.string, default_value=''), } return tf.io.parse_single_example( serialized=example_proto, features=feature_description) def _parse_mesh_data(mesh_data, mean_center=True): """Parses a raw mesh_data dictionary read from tf examples. Args: mesh_data: A mesh data dictionary with serialized data tensors, as output from _parse_tfex_proto() mean_center: If true, centers the mesh vertices to mean(vertices). Returns: A mesh data dictionary with following fields: 'num_vertices': The `int32` number of vertices in mesh. 'num_triangles': The `int32` number of triangles in mesh. 'num_edges': The `int32` number of unique directed edges in mesh. 'vertices': A [V, 3] `float32` of vertex positions. 'triangles': A [T, 3] `int32` tensor of triangle vertex indices. 'labels': A [V] `int32` tensor of per vertex class labels. 'edges': A [E, 2] `int32` tensor of unique directed edges in mesh. 'edge_weights': A [E] `float32` tensor of vertex degree based edge weights. """ labels = tf.io.parse_tensor(mesh_data['labels'], tf.int32) vertices = tf.io.parse_tensor(mesh_data['vertices'], tf.float32) triangles = tf.io.parse_tensor(mesh_data['triangles'], tf.int32) if mean_center: vertices = vertices - tf.reduce_mean( input_tensor=vertices, axis=0, keepdims=True) edges, weights = tf.py_function( func=lambda t: get_weighted_edges(t.numpy()), inp=[triangles], Tout=[tf.int32, tf.float32]) num_edges = tf.shape(input=edges)[0] num_vertices = tf.cast(mesh_data['num_vertices'], tf.int32) num_triangles = tf.cast(mesh_data['num_triangles'], tf.int32) mesh_data = dict( vertices=vertices, labels=labels, triangles=triangles, edges=edges, edge_weights=weights, num_triangles=num_triangles, num_vertices=num_vertices, num_edges=num_edges) return mesh_data def create_dataset_from_tfrecords(tfrecords, params): """Creates a mesh dataset given a list of tf records filenames. Args: tfrecords: A list of TFRecords filenames. params: A dictionary of IO paramaters, see DEFAULT_IO_PARAMS. Returns: A tf.data.Dataset, with each element a dictionary of batched mesh data with following fields: 'vertices': A [B, V, 3] `float32` tensor of vertex positions, possibly 0-padded. 'triangles': A [B, T, 3] `int32` tensor of triangle vertex indices, possibly 0-padded 'labels': A [B, V] `int32` tensor of per vertex class labels, possibly 0-padded 'edges': A [B, E, 2] `int32` tensor of unique directed edges in mesh, possibly 0-padded 'edge_weights': A [B, E] `float32` tensor of vertex degree based edge weights, possibly 0-padded. 'num_edges': A [B] `int32` tensor of number of unique directed edges in each mesh in the batch. 'num_vertices': A [B] `int32` tensor of number of vertices in each mesh in the batch. 'num_triangles': A [B] `int32` tensor of number of triangles in each mesh in the batch. """ def _set_default_if_none(param, param_dict, default_val): if param not in param_dict: return default_val else: return default_val if param_dict[param] is None else param_dict[param] is_training = params['is_training'] shuffle = _set_default_if_none('shuffle', params, is_training) repeat = _set_default_if_none('repeat', params, is_training) sloppy = _set_default_if_none('sloppy', params, is_training) if not isinstance(tfrecords, list): tfrecords = [tfrecords] dataset = _tfrecords_to_dataset(tfrecords, params['parallel_threads'], shuffle, repeat, sloppy) dataset = dataset.map(_parse_tfex_proto, tf.data.experimental.AUTOTUNE) dataset = dataset.map( lambda x: _parse_mesh_data(x, mean_center=params['mean_center']), tf.data.experimental.AUTOTUNE) if repeat: dataset = dataset.repeat() if shuffle: dataset = dataset.shuffle(params['shuffle_buffer_size']) return dataset.padded_batch( params['batch_size'], padded_shapes={ 'vertices': [None, 3], 'labels': [None], 'triangles': [None, 3], 'edges': [None, 2], 'edge_weights': [None], 'num_edges': [], 'num_vertices': [], 'num_triangles': [], }, drop_remainder=is_training) def create_input_from_dataset(dataset_fn, files, io_params): """Creates input function given dataset generator and input files. Args: dataset_fn: A dataset generator function. files: A list of TFRecords filenames. io_params: A dictionary of IO paramaters, see DEFAULT_IO_PARAMS. Returns: features: A dictionary of mesh data training features. labels: A [B] `int32` tensor of per vertex class labels. """ for k in DEFAULT_IO_PARAMS: io_params[k] = io_params[k] if k in io_params else DEFAULT_IO_PARAMS[k] dataset = dataset_fn(files, io_params) mesh_data = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next() mesh_data['neighbors'] = adjacency_from_edges(mesh_data['edges'], mesh_data['edge_weights'], mesh_data['num_edges'], mesh_data['num_vertices']) max_num_verts = tf.reduce_max(input_tensor=mesh_data['num_vertices']) features = dict( vertices=tf.reshape(mesh_data['vertices'], [-1, max_num_verts, 3]), triangles=mesh_data['triangles'], neighbors=mesh_data['neighbors'], num_triangles=mesh_data['num_triangles'], num_vertices=mesh_data['num_vertices']) labels = mesh_data['labels'] # Copy labels to features dictionary for estimator prediction mode. if not io_params['is_training']: features['labels'] = mesh_data['labels'] return features, labels
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dataset Pipeline for mesh_segmentation_demo.ipynb. The shorthands used in parameter descriptions below are 'B': Batch size. 'E': Number of unique directed edges in a mesh. 'V': Number of vertices in a mesh. 'T': Number of triangles in a mesh. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.convolution import utils as conv_utils from tensorflow_graphics.geometry.representation.mesh import utils as mesh_utils from tensorflow_graphics.util import shape DEFAULT_IO_PARAMS = { 'batch_size': 8, 'shuffle_buffer_size': 100, 'is_training': True, 'parallel_threads': 5, 'mean_center': True, 'shuffle': None, 'repeat': None, } def adjacency_from_edges(edges, weights, num_edges, num_vertices): """Returns a batched sparse 1-ring adj tensor from edge list tensor. Args: edges: [B, E, 2] `int32` tensor of edges, possibly 0 padded. weights: [B, E] `float32` tensor of edge weights, possibly 0 padded. num_edges: [B] `int32` tensor of number of valid edges per batch sample. num_vertices: [B] `int32` tensor of number of valid vertices per batch sample. Returns: adj: A batched SparseTensor of weighted adjacency graph, of dense_shape [B, V, V] where V is max(num_vertices) """ edges = tf.convert_to_tensor(value=edges) weights = tf.convert_to_tensor(value=weights) num_edges = tf.convert_to_tensor(value=num_edges) num_vertices = tf.convert_to_tensor(value=num_vertices) if not edges.dtype.is_integer: raise TypeError("'edges' must have an integer type.") if not num_edges.dtype.is_integer: raise TypeError("'num_edges' must have an integer type.") if not num_vertices.dtype.is_integer: raise TypeError("'num_vertices' must have an integer type.") if not weights.dtype.is_floating: raise TypeError("'weights' must have a floating type.") shape.check_static(tensor=edges, tensor_name='edges', has_rank=3) shape.check_static(tensor=weights, tensor_name='weights', has_rank=2) shape.check_static(tensor=num_edges, tensor_name='num_edges', has_rank=1) shape.check_static( tensor=num_vertices, tensor_name='num_vertices', has_rank=1) shape.compare_dimensions( tensors=(edges, weights, num_edges, num_vertices), tensor_names=('edges', 'weights', 'num_edges', 'num_vertices'), axes=(-3, -2, -1, -1)) shape.compare_dimensions( tensors=(edges, weights), tensor_names=('edges', 'weights'), axes=(-2, -1)) batch_size = tf.shape(input=edges)[0] max_num_vertices = tf.reduce_max(input_tensor=num_vertices) max_num_edges = tf.shape(input=edges)[1] batch_col = tf.reshape(tf.range(batch_size, dtype=edges.dtype), [-1, 1, 1]) batch_col = tf.tile(batch_col, [1, max_num_edges, 1]) batch_edges = tf.concat([batch_col, edges], axis=-1) indices, _ = conv_utils.flatten_batch_to_2d(batch_edges, sizes=num_edges) values, _ = conv_utils.flatten_batch_to_2d( tf.expand_dims(weights, -1), sizes=num_edges) values = tf.squeeze(values) adjacency = tf.SparseTensor( indices=tf.cast(indices, tf.int64), values=values, dense_shape=[batch_size, max_num_vertices, max_num_vertices]) adjacency = tf.sparse.reorder(adjacency) return adjacency def get_weighted_edges(faces, self_edges=True): r"""Gets unique edges and degree weights from a triangular mesh. The shorthands used below are: `T`: The number of triangles in the mesh. `E`: The number of unique directed edges in the mesh. Args: faces: A [T, 3] `int32` numpy.ndarray of triangle vertex indices. self_edges: A `bool` flag. If true, then for every vertex 'i' an edge [i, i] is added to edge list. Returns: edges: A [E, 2] `int32` numpy.ndarray of directed edges. weights: A [E] `float32` numpy.ndarray denoting edge weights. The degree of a vertex is the number of edges incident on the vertex, including any self-edges. The weight for an edge $w_{ij}$ connecting vertex $v_i$ and vertex $v_j$ is defined as, $$ w_{ij} = 1.0 / degree(v_i) \sum_{j} w_{ij} = 1 $$ """ edges = mesh_utils.extract_unique_edges_from_triangular_mesh( faces, directed_edges=True).astype(np.int32) if self_edges: vertices = np.expand_dims(np.unique(edges[:, 0]), axis=1) self_edges = np.concatenate((vertices, vertices), axis=1) edges = np.unique(np.concatenate((edges, self_edges), axis=0), axis=0) weights = mesh_utils.get_degree_based_edge_weights(edges, dtype=np.float32) return edges, weights def _tfrecords_to_dataset(tfrecords, parallel_threads, shuffle, repeat, sloppy, max_readers=16): """Creates a TFRecordsDataset that iterates over filenames in parallel. Args: tfrecords: A list of tf.Data.TFRecords filenames. parallel_threads: The `int` number denoting number of parallel worker threads. shuffle: The `bool` flag denoting whether to shuffle the dataset. repeat: The `bool` flag denoting whether to repeat the dataset. sloppy: The `bool` flag denoting if elements are produced in deterministic order. max_readers: The `int` number denoting the maximum number of input tfrecords to interleave from in parallel. Returns: A tf.data.TFRecordDataset """ total_tfrecords = sum([len(tf.io.gfile.glob(f)) for f in tfrecords]) num_readers = min(total_tfrecords, max_readers) dataset = tf.data.Dataset.list_files(tfrecords, shuffle=shuffle) if repeat: dataset = dataset.repeat() return dataset.apply( tf.data.experimental.parallel_interleave( tf.data.TFRecordDataset, num_readers, sloppy=sloppy, buffer_output_elements=parallel_threads, prefetch_input_elements=parallel_threads)) def _parse_tfex_proto(example_proto): """Parses the tfexample proto to a raw mesh_data dictionary. Args: example_proto: A tf.Example proto storing the encoded mesh data. Returns: A mesh data dictionary with the following fields: 'num_vertices': The `int64` number of vertices in mesh. 'num_triangles': The `int64` number of triangles in mesh. 'vertices': A serialized tensor of vertex positions. 'triangles': A serialized tensor of triangle vertex indices. 'labels': A serialized tensor of per vertex class labels. """ feature_description = { 'num_vertices': tf.io.FixedLenFeature([], tf.int64, default_value=0), 'num_triangles': tf.io.FixedLenFeature([], tf.int64, default_value=0), 'vertices': tf.io.FixedLenFeature([], tf.string, default_value=''), 'triangles': tf.io.FixedLenFeature([], tf.string, default_value=''), 'labels': tf.io.FixedLenFeature([], tf.string, default_value=''), } return tf.io.parse_single_example( serialized=example_proto, features=feature_description) def _parse_mesh_data(mesh_data, mean_center=True): """Parses a raw mesh_data dictionary read from tf examples. Args: mesh_data: A mesh data dictionary with serialized data tensors, as output from _parse_tfex_proto() mean_center: If true, centers the mesh vertices to mean(vertices). Returns: A mesh data dictionary with following fields: 'num_vertices': The `int32` number of vertices in mesh. 'num_triangles': The `int32` number of triangles in mesh. 'num_edges': The `int32` number of unique directed edges in mesh. 'vertices': A [V, 3] `float32` of vertex positions. 'triangles': A [T, 3] `int32` tensor of triangle vertex indices. 'labels': A [V] `int32` tensor of per vertex class labels. 'edges': A [E, 2] `int32` tensor of unique directed edges in mesh. 'edge_weights': A [E] `float32` tensor of vertex degree based edge weights. """ labels = tf.io.parse_tensor(mesh_data['labels'], tf.int32) vertices = tf.io.parse_tensor(mesh_data['vertices'], tf.float32) triangles = tf.io.parse_tensor(mesh_data['triangles'], tf.int32) if mean_center: vertices = vertices - tf.reduce_mean( input_tensor=vertices, axis=0, keepdims=True) edges, weights = tf.py_function( func=lambda t: get_weighted_edges(t.numpy()), inp=[triangles], Tout=[tf.int32, tf.float32]) num_edges = tf.shape(input=edges)[0] num_vertices = tf.cast(mesh_data['num_vertices'], tf.int32) num_triangles = tf.cast(mesh_data['num_triangles'], tf.int32) mesh_data = dict( vertices=vertices, labels=labels, triangles=triangles, edges=edges, edge_weights=weights, num_triangles=num_triangles, num_vertices=num_vertices, num_edges=num_edges) return mesh_data def create_dataset_from_tfrecords(tfrecords, params): """Creates a mesh dataset given a list of tf records filenames. Args: tfrecords: A list of TFRecords filenames. params: A dictionary of IO paramaters, see DEFAULT_IO_PARAMS. Returns: A tf.data.Dataset, with each element a dictionary of batched mesh data with following fields: 'vertices': A [B, V, 3] `float32` tensor of vertex positions, possibly 0-padded. 'triangles': A [B, T, 3] `int32` tensor of triangle vertex indices, possibly 0-padded 'labels': A [B, V] `int32` tensor of per vertex class labels, possibly 0-padded 'edges': A [B, E, 2] `int32` tensor of unique directed edges in mesh, possibly 0-padded 'edge_weights': A [B, E] `float32` tensor of vertex degree based edge weights, possibly 0-padded. 'num_edges': A [B] `int32` tensor of number of unique directed edges in each mesh in the batch. 'num_vertices': A [B] `int32` tensor of number of vertices in each mesh in the batch. 'num_triangles': A [B] `int32` tensor of number of triangles in each mesh in the batch. """ def _set_default_if_none(param, param_dict, default_val): if param not in param_dict: return default_val else: return default_val if param_dict[param] is None else param_dict[param] is_training = params['is_training'] shuffle = _set_default_if_none('shuffle', params, is_training) repeat = _set_default_if_none('repeat', params, is_training) sloppy = _set_default_if_none('sloppy', params, is_training) if not isinstance(tfrecords, list): tfrecords = [tfrecords] dataset = _tfrecords_to_dataset(tfrecords, params['parallel_threads'], shuffle, repeat, sloppy) dataset = dataset.map(_parse_tfex_proto, tf.data.experimental.AUTOTUNE) dataset = dataset.map( lambda x: _parse_mesh_data(x, mean_center=params['mean_center']), tf.data.experimental.AUTOTUNE) if repeat: dataset = dataset.repeat() if shuffle: dataset = dataset.shuffle(params['shuffle_buffer_size']) return dataset.padded_batch( params['batch_size'], padded_shapes={ 'vertices': [None, 3], 'labels': [None], 'triangles': [None, 3], 'edges': [None, 2], 'edge_weights': [None], 'num_edges': [], 'num_vertices': [], 'num_triangles': [], }, drop_remainder=is_training) def create_input_from_dataset(dataset_fn, files, io_params): """Creates input function given dataset generator and input files. Args: dataset_fn: A dataset generator function. files: A list of TFRecords filenames. io_params: A dictionary of IO paramaters, see DEFAULT_IO_PARAMS. Returns: features: A dictionary of mesh data training features. labels: A [B] `int32` tensor of per vertex class labels. """ for k in DEFAULT_IO_PARAMS: io_params[k] = io_params[k] if k in io_params else DEFAULT_IO_PARAMS[k] dataset = dataset_fn(files, io_params) mesh_data = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next() mesh_data['neighbors'] = adjacency_from_edges(mesh_data['edges'], mesh_data['edge_weights'], mesh_data['num_edges'], mesh_data['num_vertices']) max_num_verts = tf.reduce_max(input_tensor=mesh_data['num_vertices']) features = dict( vertices=tf.reshape(mesh_data['vertices'], [-1, max_num_verts, 3]), triangles=mesh_data['triangles'], neighbors=mesh_data['neighbors'], num_triangles=mesh_data['num_triangles'], num_vertices=mesh_data['num_vertices']) labels = mesh_data['labels'] # Copy labels to features dictionary for estimator prediction mode. if not io_params['is_training']: features['labels'] = mesh_data['labels'] return features, labels
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/projects/cvxnet/lib/utils.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from os import path import numpy as np import scipy as sp from skimage import measure import tensorflow.compat.v1 as tf from tensorflow_graphics.projects.cvxnet.lib import datasets from tensorflow_graphics.projects.cvxnet.lib import models from tensorflow_graphics.projects.cvxnet.lib.libmise import mise import trimesh Stats = collections.namedtuple("Stats", ["iou", "chamfer", "fscore"]) SYSNET_CLASSES = { "02691156": "airplane", "02933112": "cabinet", "03001627": "chair", "03636649": "lamp", "04090263": "rifle", "04379243": "table", "04530566": "watercraft", "02828884": "bench", "02958343": "car", "03211117": "display", "03691459": "speaker", "04256520": "sofa", "04401088": "telephone", "all": "all", } def define_flags(): """Define command line flags.""" flags = tf.app.flags # Model flags flags.DEFINE_enum("model", "multiconvex", list(k for k in models.model_dict.keys()), "Name of the model.") flags.DEFINE_float("sharpness", 75., "Sharpness term.") flags.DEFINE_integer("n_parts", 50, "Number of convexes uesd.") flags.DEFINE_integer("n_half_planes", 25, "Number of half spaces used.") flags.DEFINE_integer("latent_size", 256, "The size of latent code.") flags.DEFINE_integer("dims", 3, "The dimension of query points.") flags.DEFINE_bool("image_input", False, "Use color images as input if True.") flags.DEFINE_float("vis_scale", 1.3, "Scale of bbox used when extracting meshes.") flags.DEFINE_float("level_set", 0.5, "Level set used for extracting surfaces.") # Dataset flags flags.DEFINE_enum("dataset", "shapenet", list(k for k in datasets.dataset_dict.keys()), "Name of the dataset.") flags.DEFINE_integer("image_h", 137, "The height of the color images.") flags.DEFINE_integer("image_w", 137, "The width of the color images.") flags.DEFINE_integer("image_d", 3, "The channels of color images.") flags.DEFINE_integer("depth_h", 224, "The height of depth images.") flags.DEFINE_integer("depth_w", 224, "The width of depth images.") flags.DEFINE_integer("depth_d", 20, "The number of depth views.") flags.DEFINE_integer("n_views", 24, "The number of color images views.") flags.DEFINE_string("data_dir", None, "The base directory to load data from.") flags.mark_flag_as_required("data_dir") flags.DEFINE_string("obj_class", "*", "Object class used from dataset.") # Training flags flags.DEFINE_float("lr", 1e-4, "Start learning rate.") flags.DEFINE_string( "train_dir", None, "The base directory to save training info and" "checkpoints.") flags.DEFINE_integer("save_every", 20000, "The number of steps to save checkpoint.") flags.DEFINE_integer("max_steps", 800000, "The number of steps of training.") flags.DEFINE_integer("batch_size", 32, "Batch size.") flags.DEFINE_integer("sample_bbx", 1024, "The number of bounding box sample points.") flags.DEFINE_integer("sample_surf", 1024, "The number of surface sample points.") flags.DEFINE_float("weight_overlap", 0.1, "Weight of overlap_loss") flags.DEFINE_float("weight_balance", 0.01, "Weight of balance_loss") flags.DEFINE_float("weight_center", 0.001, "Weight of center_loss") flags.mark_flag_as_required("train_dir") # Eval flags flags.DEFINE_bool("extract_mesh", False, "Extract meshes and set to disk if True.") flags.DEFINE_bool("surface_metrics", False, "Measure surface metrics and save to csv if True.") flags.DEFINE_string("mesh_dir", None, "Path to load ground truth meshes.") flags.DEFINE_string("trans_dir", None, "Path to load pred-to-target transformations.") flags.DEFINE_bool("eval_once", False, "Evaluate the model only once if True.") def mesh_name_helper(name): name = name[0].decode("utf-8") split = name.find("-") cls_name = name[:split] obj_name = name[split + 1:] return cls_name, obj_name def extract_mesh(input_val, params, indicators, input_holder, params_holder, points_holder, sess, args): """Extracting meshes from an indicator function. Args: input_val: np.array, [1, height, width, channel], input image. params: tf.Operation, hyperplane parameter hook. indicators: tf.Operation, indicator hook. input_holder: tf.Placeholder, input image placeholder. params_holder: tf.Placeholder, hyperplane parameter placeholder. points_holder: tf.Placeholder, query point placeholder. sess: tf.Session, running sess. args: tf.app.flags.FLAGS, configurations. Returns: mesh: trimesh.Trimesh, the extracted mesh. """ mesh_extractor = mise.MISE(64, 1, args.level_set) points = mesh_extractor.query() params_val = sess.run(params, {input_holder: input_val}) while points.shape[0] != 0: orig_points = points points = points.astype(np.float32) points = ( (np.expand_dims(points, axis=0) / mesh_extractor.resolution - 0.5) * args.vis_scale) n_points = points.shape[1] values = [] for i in range(0, n_points, 100000): # Add this to prevent OOM. value = sess.run(indicators, { params_holder: params_val, points_holder: points[:, i:i + 100000] }) values.append(value) values = np.concatenate(values, axis=1) values = values[0, :, 0].astype(np.float64) mesh_extractor.update(orig_points, values) points = mesh_extractor.query() value_grid = mesh_extractor.to_dense() value_grid = np.pad(value_grid, 1, "constant", constant_values=-1e6) verts, faces, normals, unused_var = measure.marching_cubes_lewiner( value_grid, min(args.level_set, value_grid.max() * 0.75)) del normals verts -= 1 verts /= np.array([ value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.shape[2] - 3 ], dtype=np.float32) verts = args.vis_scale * (verts - 0.5) faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1) return trimesh.Trimesh(vertices=verts, faces=faces) def transform_mesh(mesh, name, trans_dir): """Transform mesh back to the same coordinate of ground truth. Args: mesh: trimesh.Trimesh, predicted mesh before transformation. name: Tensor, hash name of the mesh as recorded in the dataset. trans_dir: string, path to the directory for loading transformations. Returns: mesh: trimesh.Trimesh, the transformed mesh. """ if trans_dir is None: raise ValueError("Need to specify args.trans_dir for loading pred-to-target" "transformations.") cls_name, obj_name = mesh_name_helper(name) with tf.io.gfile.GFile( path.join(trans_dir, "test", cls_name, obj_name, "occnet_to_gaps.txt"), "r") as fin: tx = np.loadtxt(fin).reshape([4, 4]) mesh.apply_transform(np.linalg.inv(tx)) return mesh def save_mesh(mesh, name, eval_dir): """Save a mesh to disk. Args: mesh: trimesh.Trimesh, the mesh to save. name: Tensor, hash name of the mesh as recorded in the dataset. eval_dir: string, path to the directory to save the mesh. """ cls_name, obj_name = mesh_name_helper(name) cls_dir = path.join(eval_dir, "meshes", cls_name) if not tf.io.gfile.isdir(cls_dir): tf.io.gfile.makedirs(cls_dir) with tf.io.gfile.GFile(path.join(cls_dir, obj_name + ".obj"), "w") as fout: mesh.export(fout, file_type="obj") def distance_field_helper(source, target): target_kdtree = sp.spatial.cKDTree(target) distances, unused_var = target_kdtree.query(source, n_jobs=-1) return distances def compute_surface_metrics(mesh, name, mesh_dir): """Compute surface metrics (chamfer distance and f-score) for one example. Args: mesh: trimesh.Trimesh, the mesh to evaluate. name: Tensor, hash name of the mesh as recorded in the dataset. mesh_dir: string, path to the directory for loading ground truth meshes. Returns: chamfer: float, chamfer distance. fscore: float, f-score. """ if mesh_dir is None: raise ValueError("Need to specify args.mesh_dir for loading ground truth.") cls_name, obj_name = mesh_name_helper(name) with tf.io.gfile.GFile( path.join(mesh_dir, "test", cls_name, obj_name, "model_occnet.ply"), "rb", ) as fin: mesh_gt = trimesh.Trimesh(**trimesh.exchange.ply.load_ply(fin)) # Chamfer eval_points = 100000 point_gt = mesh_gt.sample(eval_points) point_gt = point_gt.astype(np.float32) point_pred = mesh.sample(eval_points) point_pred = point_pred.astype(np.float32) pred_to_gt = distance_field_helper(point_pred, point_gt) gt_to_pred = distance_field_helper(point_gt, point_pred) chamfer = np.mean(pred_to_gt**2) + np.mean(gt_to_pred**2) # Fscore tau = 1e-4 eps = 1e-9 pred_to_gt = (pred_to_gt**2) gt_to_pred = (gt_to_pred**2) prec_tau = (pred_to_gt <= tau).astype(np.float32).mean() * 100. recall_tau = (gt_to_pred <= tau).astype(np.float32).mean() * 100. fscore = (2 * prec_tau * recall_tau) / max(prec_tau + recall_tau, eps) # Following the tradition to scale chamfer distance up by 10. return chamfer * 100., fscore def init_stats(): """Initialize evaluation stats.""" stats = {} for k in SYSNET_CLASSES: stats[k] = { "cnt": 0, "iou": 0., "chamfer": 0., "fscore": 0., } return stats def update_stats(example_stats, name, shapenet_stats): """Update evaluation statistics. Args: example_stats: Stats, the stats of one example. name: Tensor, hash name of the example as recorded in the dataset. shapenet_stats: dict, the current stats of the whole dataset. """ cls_name, unused_var = mesh_name_helper(name) shapenet_stats[cls_name]["cnt"] += 1 shapenet_stats[cls_name]["iou"] += example_stats.iou shapenet_stats[cls_name]["chamfer"] += example_stats.chamfer shapenet_stats[cls_name]["fscore"] += example_stats.fscore shapenet_stats["all"]["cnt"] += 1 shapenet_stats["all"]["iou"] += example_stats.iou shapenet_stats["all"]["chamfer"] += example_stats.chamfer shapenet_stats["all"]["fscore"] += example_stats.fscore def average_stats(shapenet_stats): """Average the accumulated stats of the whole dataset.""" for k, v in shapenet_stats.items(): cnt = max(v["cnt"], 1) shapenet_stats[k] = { "iou": v["iou"] / cnt, "chamfer": v["chamfer"] / cnt, "fscore": v["fscore"] / cnt, } def write_stats(stats, eval_dir, step): """Write stats of the dataset to disk. Args: stats: dict, statistics to save. eval_dir: string, path to the directory to save the statistics. step: int, the global step of the checkpoint. """ if not tf.io.gfile.isdir(eval_dir): tf.io.gfile.makedirs(eval_dir) with tf.io.gfile.GFile(path.join(eval_dir, "stats_{}.csv".format(step)), "w") as fout: fout.write("class,iou,chamfer,fscore\n") for k in sorted(stats.keys()): if k == "all": continue fout.write("{0},{1},{2},{3}\n".format( SYSNET_CLASSES[k], stats[k]["iou"], stats[k]["chamfer"], stats[k]["fscore"], )) fout.write("all,{0},{1},{2}".format( stats["all"]["iou"], stats["all"]["chamfer"], stats["all"]["fscore"], ))
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from os import path import numpy as np import scipy as sp from skimage import measure import tensorflow.compat.v1 as tf from tensorflow_graphics.projects.cvxnet.lib import datasets from tensorflow_graphics.projects.cvxnet.lib import models from tensorflow_graphics.projects.cvxnet.lib.libmise import mise import trimesh Stats = collections.namedtuple("Stats", ["iou", "chamfer", "fscore"]) SYSNET_CLASSES = { "02691156": "airplane", "02933112": "cabinet", "03001627": "chair", "03636649": "lamp", "04090263": "rifle", "04379243": "table", "04530566": "watercraft", "02828884": "bench", "02958343": "car", "03211117": "display", "03691459": "speaker", "04256520": "sofa", "04401088": "telephone", "all": "all", } def define_flags(): """Define command line flags.""" flags = tf.app.flags # Model flags flags.DEFINE_enum("model", "multiconvex", list(k for k in models.model_dict.keys()), "Name of the model.") flags.DEFINE_float("sharpness", 75., "Sharpness term.") flags.DEFINE_integer("n_parts", 50, "Number of convexes uesd.") flags.DEFINE_integer("n_half_planes", 25, "Number of half spaces used.") flags.DEFINE_integer("latent_size", 256, "The size of latent code.") flags.DEFINE_integer("dims", 3, "The dimension of query points.") flags.DEFINE_bool("image_input", False, "Use color images as input if True.") flags.DEFINE_float("vis_scale", 1.3, "Scale of bbox used when extracting meshes.") flags.DEFINE_float("level_set", 0.5, "Level set used for extracting surfaces.") # Dataset flags flags.DEFINE_enum("dataset", "shapenet", list(k for k in datasets.dataset_dict.keys()), "Name of the dataset.") flags.DEFINE_integer("image_h", 137, "The height of the color images.") flags.DEFINE_integer("image_w", 137, "The width of the color images.") flags.DEFINE_integer("image_d", 3, "The channels of color images.") flags.DEFINE_integer("depth_h", 224, "The height of depth images.") flags.DEFINE_integer("depth_w", 224, "The width of depth images.") flags.DEFINE_integer("depth_d", 20, "The number of depth views.") flags.DEFINE_integer("n_views", 24, "The number of color images views.") flags.DEFINE_string("data_dir", None, "The base directory to load data from.") flags.mark_flag_as_required("data_dir") flags.DEFINE_string("obj_class", "*", "Object class used from dataset.") # Training flags flags.DEFINE_float("lr", 1e-4, "Start learning rate.") flags.DEFINE_string( "train_dir", None, "The base directory to save training info and" "checkpoints.") flags.DEFINE_integer("save_every", 20000, "The number of steps to save checkpoint.") flags.DEFINE_integer("max_steps", 800000, "The number of steps of training.") flags.DEFINE_integer("batch_size", 32, "Batch size.") flags.DEFINE_integer("sample_bbx", 1024, "The number of bounding box sample points.") flags.DEFINE_integer("sample_surf", 1024, "The number of surface sample points.") flags.DEFINE_float("weight_overlap", 0.1, "Weight of overlap_loss") flags.DEFINE_float("weight_balance", 0.01, "Weight of balance_loss") flags.DEFINE_float("weight_center", 0.001, "Weight of center_loss") flags.mark_flag_as_required("train_dir") # Eval flags flags.DEFINE_bool("extract_mesh", False, "Extract meshes and set to disk if True.") flags.DEFINE_bool("surface_metrics", False, "Measure surface metrics and save to csv if True.") flags.DEFINE_string("mesh_dir", None, "Path to load ground truth meshes.") flags.DEFINE_string("trans_dir", None, "Path to load pred-to-target transformations.") flags.DEFINE_bool("eval_once", False, "Evaluate the model only once if True.") def mesh_name_helper(name): name = name[0].decode("utf-8") split = name.find("-") cls_name = name[:split] obj_name = name[split + 1:] return cls_name, obj_name def extract_mesh(input_val, params, indicators, input_holder, params_holder, points_holder, sess, args): """Extracting meshes from an indicator function. Args: input_val: np.array, [1, height, width, channel], input image. params: tf.Operation, hyperplane parameter hook. indicators: tf.Operation, indicator hook. input_holder: tf.Placeholder, input image placeholder. params_holder: tf.Placeholder, hyperplane parameter placeholder. points_holder: tf.Placeholder, query point placeholder. sess: tf.Session, running sess. args: tf.app.flags.FLAGS, configurations. Returns: mesh: trimesh.Trimesh, the extracted mesh. """ mesh_extractor = mise.MISE(64, 1, args.level_set) points = mesh_extractor.query() params_val = sess.run(params, {input_holder: input_val}) while points.shape[0] != 0: orig_points = points points = points.astype(np.float32) points = ( (np.expand_dims(points, axis=0) / mesh_extractor.resolution - 0.5) * args.vis_scale) n_points = points.shape[1] values = [] for i in range(0, n_points, 100000): # Add this to prevent OOM. value = sess.run(indicators, { params_holder: params_val, points_holder: points[:, i:i + 100000] }) values.append(value) values = np.concatenate(values, axis=1) values = values[0, :, 0].astype(np.float64) mesh_extractor.update(orig_points, values) points = mesh_extractor.query() value_grid = mesh_extractor.to_dense() value_grid = np.pad(value_grid, 1, "constant", constant_values=-1e6) verts, faces, normals, unused_var = measure.marching_cubes_lewiner( value_grid, min(args.level_set, value_grid.max() * 0.75)) del normals verts -= 1 verts /= np.array([ value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.shape[2] - 3 ], dtype=np.float32) verts = args.vis_scale * (verts - 0.5) faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1) return trimesh.Trimesh(vertices=verts, faces=faces) def transform_mesh(mesh, name, trans_dir): """Transform mesh back to the same coordinate of ground truth. Args: mesh: trimesh.Trimesh, predicted mesh before transformation. name: Tensor, hash name of the mesh as recorded in the dataset. trans_dir: string, path to the directory for loading transformations. Returns: mesh: trimesh.Trimesh, the transformed mesh. """ if trans_dir is None: raise ValueError("Need to specify args.trans_dir for loading pred-to-target" "transformations.") cls_name, obj_name = mesh_name_helper(name) with tf.io.gfile.GFile( path.join(trans_dir, "test", cls_name, obj_name, "occnet_to_gaps.txt"), "r") as fin: tx = np.loadtxt(fin).reshape([4, 4]) mesh.apply_transform(np.linalg.inv(tx)) return mesh def save_mesh(mesh, name, eval_dir): """Save a mesh to disk. Args: mesh: trimesh.Trimesh, the mesh to save. name: Tensor, hash name of the mesh as recorded in the dataset. eval_dir: string, path to the directory to save the mesh. """ cls_name, obj_name = mesh_name_helper(name) cls_dir = path.join(eval_dir, "meshes", cls_name) if not tf.io.gfile.isdir(cls_dir): tf.io.gfile.makedirs(cls_dir) with tf.io.gfile.GFile(path.join(cls_dir, obj_name + ".obj"), "w") as fout: mesh.export(fout, file_type="obj") def distance_field_helper(source, target): target_kdtree = sp.spatial.cKDTree(target) distances, unused_var = target_kdtree.query(source, n_jobs=-1) return distances def compute_surface_metrics(mesh, name, mesh_dir): """Compute surface metrics (chamfer distance and f-score) for one example. Args: mesh: trimesh.Trimesh, the mesh to evaluate. name: Tensor, hash name of the mesh as recorded in the dataset. mesh_dir: string, path to the directory for loading ground truth meshes. Returns: chamfer: float, chamfer distance. fscore: float, f-score. """ if mesh_dir is None: raise ValueError("Need to specify args.mesh_dir for loading ground truth.") cls_name, obj_name = mesh_name_helper(name) with tf.io.gfile.GFile( path.join(mesh_dir, "test", cls_name, obj_name, "model_occnet.ply"), "rb", ) as fin: mesh_gt = trimesh.Trimesh(**trimesh.exchange.ply.load_ply(fin)) # Chamfer eval_points = 100000 point_gt = mesh_gt.sample(eval_points) point_gt = point_gt.astype(np.float32) point_pred = mesh.sample(eval_points) point_pred = point_pred.astype(np.float32) pred_to_gt = distance_field_helper(point_pred, point_gt) gt_to_pred = distance_field_helper(point_gt, point_pred) chamfer = np.mean(pred_to_gt**2) + np.mean(gt_to_pred**2) # Fscore tau = 1e-4 eps = 1e-9 pred_to_gt = (pred_to_gt**2) gt_to_pred = (gt_to_pred**2) prec_tau = (pred_to_gt <= tau).astype(np.float32).mean() * 100. recall_tau = (gt_to_pred <= tau).astype(np.float32).mean() * 100. fscore = (2 * prec_tau * recall_tau) / max(prec_tau + recall_tau, eps) # Following the tradition to scale chamfer distance up by 10. return chamfer * 100., fscore def init_stats(): """Initialize evaluation stats.""" stats = {} for k in SYSNET_CLASSES: stats[k] = { "cnt": 0, "iou": 0., "chamfer": 0., "fscore": 0., } return stats def update_stats(example_stats, name, shapenet_stats): """Update evaluation statistics. Args: example_stats: Stats, the stats of one example. name: Tensor, hash name of the example as recorded in the dataset. shapenet_stats: dict, the current stats of the whole dataset. """ cls_name, unused_var = mesh_name_helper(name) shapenet_stats[cls_name]["cnt"] += 1 shapenet_stats[cls_name]["iou"] += example_stats.iou shapenet_stats[cls_name]["chamfer"] += example_stats.chamfer shapenet_stats[cls_name]["fscore"] += example_stats.fscore shapenet_stats["all"]["cnt"] += 1 shapenet_stats["all"]["iou"] += example_stats.iou shapenet_stats["all"]["chamfer"] += example_stats.chamfer shapenet_stats["all"]["fscore"] += example_stats.fscore def average_stats(shapenet_stats): """Average the accumulated stats of the whole dataset.""" for k, v in shapenet_stats.items(): cnt = max(v["cnt"], 1) shapenet_stats[k] = { "iou": v["iou"] / cnt, "chamfer": v["chamfer"] / cnt, "fscore": v["fscore"] / cnt, } def write_stats(stats, eval_dir, step): """Write stats of the dataset to disk. Args: stats: dict, statistics to save. eval_dir: string, path to the directory to save the statistics. step: int, the global step of the checkpoint. """ if not tf.io.gfile.isdir(eval_dir): tf.io.gfile.makedirs(eval_dir) with tf.io.gfile.GFile(path.join(eval_dir, "stats_{}.csv".format(step)), "w") as fout: fout.write("class,iou,chamfer,fscore\n") for k in sorted(stats.keys()): if k == "all": continue fout.write("{0},{1},{2},{3}\n".format( SYSNET_CLASSES[k], stats[k]["iou"], stats[k]["chamfer"], stats[k]["fscore"], )) fout.write("all,{0},{1},{2}".format( stats["all"]["iou"], stats["all"]["chamfer"], stats["all"]["fscore"], ))
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/geometry/representation/mesh/sampler.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Computes a weighted point sampling of a triangular mesh. This op computes a uniform sampling of points on the surface of the mesh. Points are sampled from the surface of each triangle using a uniform distribution, proportional to a specified face density (e.g. face area). Uses the approach mentioned in the TOG 2002 paper "Shape distributions" (https://dl.acm.org/citation.cfm?id=571648) to generate random barycentric coordinates. This op can be used for several tasks, including better mesh reconstruction. For example, see these recent papers demonstrating reconstruction losses using this op: 1. "GEOMetrics: Exploiting Geometric Structure for Graph-Encoded Objects" (https://arxiv.org/abs/1901.11461) ICML 2019. 2. "Mesh R-CNN" (https://arxiv.org/abs/1906.02739) ICCV 2019. Op is differentiable w.r.t mesh vertex positions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.geometry.representation import triangle from tensorflow_graphics.geometry.representation.mesh import normals from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def triangle_area(vertex0, vertex1, vertex2, name="triangle_area"): """Computes triangle areas. Note: Computed triangle area = 0.5 * | e1 x e2 | where e1 and e2 are edges of triangle. In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. In the following, A1 to An are optional batch dimensions. Args: vertex0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the first vertex of a triangle. vertex1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the second vertex of a triangle. vertex2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the third vertex of a triangle. name: A name for this op. Defaults to "triangle_area". Returns: A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents the triangle areas. """ with tf.name_scope(name): vertex0 = tf.convert_to_tensor(value=vertex0) vertex1 = tf.convert_to_tensor(value=vertex1) vertex2 = tf.convert_to_tensor(value=vertex2) triangle_normals = triangle.normal( vertex0, vertex1, vertex2, normalize=False) areas = 0.5 * tf.linalg.norm(tensor=triangle_normals, axis=-1) return areas def _random_categorical_sample(num_samples, weights, seed=None, stateless=False, name="random_categorical_sample", sample_dtype=tf.int32): """Samples from a categorical distribution with arbitrary batch dimensions. Note: In the following, A1 to An are optional batch dimensions. Args: num_samples: An `int32` scalar denoting the number of samples to generate per mesh. weights: A `float` tensor of shape `[A1, ..., An, F]` where F is number of faces. All weights must be > 0. seed: Optional random seed, value depends on `stateless`. stateless: Optional flag to use stateless random sampler. If stateless=True, then `seed` must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate the same reproducible sequence across calls. If stateless=False, then a stateful random number generator is used (default behavior). name: Name for op. Defaults to "random_categorical_sample". sample_dtype: Type of output samples. Returns: A `sample_dtype` tensor of shape `[A1, ..., An, num_samples]`. """ with tf.name_scope(name): asserts.assert_all_above(weights, 0) logits = tf.math.log(weights) num_faces = tf.shape(input=logits)[-1] batch_shape = tf.shape(input=logits)[:-1] logits_2d = tf.reshape(logits, [-1, num_faces]) if stateless: seed = tf.convert_to_tensor(value=seed) shape.check_static( tensor=seed, tensor_name="seed", has_dim_equals=(-1, 2)) sample_fn = tf.random.stateless_categorical else: sample_fn = tf.random.categorical draws = sample_fn( logits=logits_2d, num_samples=num_samples, dtype=sample_dtype, seed=seed) samples = tf.reshape( draws, shape=tf.concat((batch_shape, (num_samples,)), axis=0)) return samples def generate_random_face_indices(num_samples, face_weights, seed=None, stateless=False, name="generate_random_face_indices"): """Generate a sample of face ids given per face probability. Note: In the following, A1 to An are optional batch dimensions. Args: num_samples: An `int32` scalar denoting the number of samples to generate per mesh. face_weights: A `float` tensor of shape `[A1, ..., An, F]` where F is number of faces. All weights must be > 0. seed: Optional seed for the random number generator. stateless: Optional flag to use stateless random sampler. If stateless=True, then `seed` must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate the same reproducible sequence across calls. If stateless=False, then a stateful random number generator is used (default behavior). name: Name for op. Defaults to "generate_random_face_indices". Returns: An `int32` tensor of shape `[A1, ..., An, num_samples]` denoting sampled face indices. """ with tf.name_scope(name): num_samples = tf.convert_to_tensor(value=num_samples) face_weights = tf.convert_to_tensor(value=face_weights) shape.check_static( tensor=face_weights, tensor_name="face_weights", has_rank_greater_than=0) shape.check_static( tensor=num_samples, tensor_name="num_samples", has_rank=0) face_weights = asserts.assert_all_above(face_weights, minval=0.0) eps = asserts.select_eps_for_division(face_weights.dtype) face_weights = face_weights + eps sampled_face_indices = _random_categorical_sample( num_samples=num_samples, weights=face_weights, seed=seed, stateless=stateless) return sampled_face_indices def generate_random_barycentric_coordinates( sample_shape, dtype=tf.dtypes.float32, seed=None, stateless=False, name="generate_random_barycentric_coordinates"): """Generate uniformly sampled random barycentric coordinates. Note: In the following, A1 to An are optional batch dimensions. Args: sample_shape: An `int` tensor with shape `[n+1,]` and values `(A1, ..., An, num_samples)` denoting total number of random samples drawn, where `n` is number of batch dimensions, and `num_samples` is the number of samples drawn for each mesh. dtype: Optional type of generated barycentric coordinates, defaults to float32. seed: An optional random seed. stateless: Optional flag to use stateless random sampler. If stateless=True, then `seed` must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate the same reproducible sequence across calls. If stateless=False, then a stateful random number generator is used (default behavior). name: Name for op. Defaults to "generate_random_barycentric_coordinates". Returns: A `dtype` tensor of shape [A1, ..., An, num_samples, 3], where the last dimension contains the sampled barycentric coordinates. """ with tf.name_scope(name): sample_shape = tf.convert_to_tensor(value=sample_shape) shape.check_static( tensor=sample_shape, tensor_name="sample_shape", has_rank=1) sample_shape = tf.concat((sample_shape, (2,)), axis=0) if stateless: seed = tf.convert_to_tensor(value=seed) shape.check_static( tensor=seed, tensor_name="seed", has_dim_equals=(-1, 2)) sample_fn = tf.random.stateless_uniform else: sample_fn = tf.random.uniform random_uniform = sample_fn( shape=sample_shape, minval=0.0, maxval=1.0, dtype=dtype, seed=seed) random1 = tf.sqrt(random_uniform[..., 0]) random2 = random_uniform[..., 1] barycentric = tf.stack( (1 - random1, random1 * (1 - random2), random1 * random2), axis=-1) return barycentric def weighted_random_sample_triangle_mesh( vertex_attributes, faces, num_samples, face_weights, seed=None, stateless=False, name="weighted_random_sample_triangle_mesh"): """Performs a face probability weighted random sampling of a tri mesh. Note: In the following, A1 to An are optional batch dimensions. Args: vertex_attributes: A `float` tensor of shape `[A1, ..., An, V, D]`, where V is the number of vertices, and D is dimensionality of each vertex. faces: A `int` tensor of shape `[A1, ..., An, F, 3]`, where F is the number of faces. num_samples: A `int` 0-D tensor denoting number of samples to be drawn from each mesh. face_weights: A `float` tensor of shape ``[A1, ..., An, F]`, denoting unnormalized sampling probability of each face, where F is the number of faces. seed: Optional random seed. stateless: Optional flag to use stateless random sampler. If stateless=True, then seed must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate same sequence across calls. name: Name for op. Defaults to "weighted_random_sample_triangle_mesh". Returns: sample_points: A `float` tensor of shape `[A1, ..., An, num_samples, D]`, where D is dimensionality of each sampled point. sample_face_indices: A `int` tensor of shape `[A1, ..., An, num_samples]`. """ with tf.name_scope(name): faces = tf.convert_to_tensor(value=faces) vertex_attributes = tf.convert_to_tensor(value=vertex_attributes) face_weights = tf.convert_to_tensor(value=face_weights) num_samples = tf.convert_to_tensor(value=num_samples) shape.check_static( tensor=vertex_attributes, tensor_name="vertex_attributes", has_rank_greater_than=1) shape.check_static( tensor=faces, tensor_name="faces", has_rank_greater_than=1) shape.check_static( tensor=face_weights, tensor_name="face_weights", has_rank_greater_than=0) shape.compare_batch_dimensions( tensors=(faces, face_weights), last_axes=(-2, -1), tensor_names=("faces", "face_weights"), broadcast_compatible=False) shape.compare_batch_dimensions( tensors=(vertex_attributes, faces, face_weights), last_axes=(-3, -3, -2), tensor_names=("vertex_attributes", "faces", "face_weights"), broadcast_compatible=False) asserts.assert_all_above(face_weights, 0) batch_dims = faces.shape.ndims - 2 batch_shape = faces.shape.as_list()[:-2] sample_shape = tf.concat( (batch_shape, tf.convert_to_tensor( value=(num_samples,), dtype=tf.int32)), axis=0) sample_face_indices = generate_random_face_indices( num_samples, face_weights, seed=seed, stateless=stateless) sample_vertex_indices = tf.gather( faces, sample_face_indices, batch_dims=batch_dims) sample_vertices = tf.gather( vertex_attributes, sample_vertex_indices, batch_dims=batch_dims) barycentric = generate_random_barycentric_coordinates( sample_shape, dtype=vertex_attributes.dtype, seed=seed, stateless=stateless) barycentric = tf.expand_dims(barycentric, axis=-1) sample_points = tf.math.multiply(sample_vertices, barycentric) sample_points = tf.reduce_sum(input_tensor=sample_points, axis=-2) return sample_points, sample_face_indices def area_weighted_random_sample_triangle_mesh( vertex_attributes, faces, num_samples, vertex_positions=None, seed=None, stateless=False, name="area_weighted_random_sample_triangle_mesh"): """Performs a face area weighted random sampling of a tri mesh. Note: In the following, A1 to An are optional batch dimensions. Args: vertex_attributes: A `float` tensor of shape `[A1, ..., An, V, D]`, where V is the number of vertices, and D is dimensionality of a feature defined on each vertex. If `vertex_positions` is not provided, then first 3 dimensions of `vertex_attributes` denote the vertex positions. faces: A `int` tensor of shape `[A1, ..., An, F, 3]`, where F is the number of faces. num_samples: An `int` scalar denoting number of samples to be drawn from each mesh. vertex_positions: An optional `float` tensor of shape `[A1, ..., An, V, 3]`, where V is the number of vertices. If None, then vertex_attributes[..., :3] is used as vertex positions. seed: Optional random seed. stateless: Optional flag to use stateless random sampler. If stateless=True, then seed must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate same sequence across calls. name: Name for op. Defaults to "area_weighted_random_sample_triangle_mesh". Returns: sample_pts: A `float` tensor of shape `[A1, ..., An, num_samples, D]`, where D is dimensionality of each sampled point. sample_face_indices: A `int` tensor of shape `[A1, ..., An, num_samples]`. """ with tf.name_scope(name): faces = tf.convert_to_tensor(value=faces) vertex_attributes = tf.convert_to_tensor(value=vertex_attributes) num_samples = tf.convert_to_tensor(value=num_samples) shape.check_static( tensor=vertex_attributes, tensor_name="vertex_attributes", has_rank_greater_than=1) shape.check_static( tensor=vertex_attributes, tensor_name="vertex_attributes", has_dim_greater_than=(-1, 2)) if vertex_positions is not None: vertex_positions = tf.convert_to_tensor(value=vertex_positions) else: vertex_positions = vertex_attributes[..., :3] shape.check_static( tensor=vertex_positions, tensor_name="vertex_positions", has_rank_greater_than=1) shape.check_static( tensor=vertex_positions, tensor_name="vertex_positions", has_dim_equals=(-1, 3)) triangle_vertex_positions = normals.gather_faces(vertex_positions, faces) triangle_areas = triangle_area(triangle_vertex_positions[..., 0, :], triangle_vertex_positions[..., 1, :], triangle_vertex_positions[..., 2, :]) return weighted_random_sample_triangle_mesh( vertex_attributes, faces, num_samples, face_weights=triangle_areas, seed=seed, stateless=stateless) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Computes a weighted point sampling of a triangular mesh. This op computes a uniform sampling of points on the surface of the mesh. Points are sampled from the surface of each triangle using a uniform distribution, proportional to a specified face density (e.g. face area). Uses the approach mentioned in the TOG 2002 paper "Shape distributions" (https://dl.acm.org/citation.cfm?id=571648) to generate random barycentric coordinates. This op can be used for several tasks, including better mesh reconstruction. For example, see these recent papers demonstrating reconstruction losses using this op: 1. "GEOMetrics: Exploiting Geometric Structure for Graph-Encoded Objects" (https://arxiv.org/abs/1901.11461) ICML 2019. 2. "Mesh R-CNN" (https://arxiv.org/abs/1906.02739) ICCV 2019. Op is differentiable w.r.t mesh vertex positions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.geometry.representation import triangle from tensorflow_graphics.geometry.representation.mesh import normals from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def triangle_area(vertex0, vertex1, vertex2, name="triangle_area"): """Computes triangle areas. Note: Computed triangle area = 0.5 * | e1 x e2 | where e1 and e2 are edges of triangle. In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. In the following, A1 to An are optional batch dimensions. Args: vertex0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the first vertex of a triangle. vertex1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the second vertex of a triangle. vertex2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the third vertex of a triangle. name: A name for this op. Defaults to "triangle_area". Returns: A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents the triangle areas. """ with tf.name_scope(name): vertex0 = tf.convert_to_tensor(value=vertex0) vertex1 = tf.convert_to_tensor(value=vertex1) vertex2 = tf.convert_to_tensor(value=vertex2) triangle_normals = triangle.normal( vertex0, vertex1, vertex2, normalize=False) areas = 0.5 * tf.linalg.norm(tensor=triangle_normals, axis=-1) return areas def _random_categorical_sample(num_samples, weights, seed=None, stateless=False, name="random_categorical_sample", sample_dtype=tf.int32): """Samples from a categorical distribution with arbitrary batch dimensions. Note: In the following, A1 to An are optional batch dimensions. Args: num_samples: An `int32` scalar denoting the number of samples to generate per mesh. weights: A `float` tensor of shape `[A1, ..., An, F]` where F is number of faces. All weights must be > 0. seed: Optional random seed, value depends on `stateless`. stateless: Optional flag to use stateless random sampler. If stateless=True, then `seed` must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate the same reproducible sequence across calls. If stateless=False, then a stateful random number generator is used (default behavior). name: Name for op. Defaults to "random_categorical_sample". sample_dtype: Type of output samples. Returns: A `sample_dtype` tensor of shape `[A1, ..., An, num_samples]`. """ with tf.name_scope(name): asserts.assert_all_above(weights, 0) logits = tf.math.log(weights) num_faces = tf.shape(input=logits)[-1] batch_shape = tf.shape(input=logits)[:-1] logits_2d = tf.reshape(logits, [-1, num_faces]) if stateless: seed = tf.convert_to_tensor(value=seed) shape.check_static( tensor=seed, tensor_name="seed", has_dim_equals=(-1, 2)) sample_fn = tf.random.stateless_categorical else: sample_fn = tf.random.categorical draws = sample_fn( logits=logits_2d, num_samples=num_samples, dtype=sample_dtype, seed=seed) samples = tf.reshape( draws, shape=tf.concat((batch_shape, (num_samples,)), axis=0)) return samples def generate_random_face_indices(num_samples, face_weights, seed=None, stateless=False, name="generate_random_face_indices"): """Generate a sample of face ids given per face probability. Note: In the following, A1 to An are optional batch dimensions. Args: num_samples: An `int32` scalar denoting the number of samples to generate per mesh. face_weights: A `float` tensor of shape `[A1, ..., An, F]` where F is number of faces. All weights must be > 0. seed: Optional seed for the random number generator. stateless: Optional flag to use stateless random sampler. If stateless=True, then `seed` must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate the same reproducible sequence across calls. If stateless=False, then a stateful random number generator is used (default behavior). name: Name for op. Defaults to "generate_random_face_indices". Returns: An `int32` tensor of shape `[A1, ..., An, num_samples]` denoting sampled face indices. """ with tf.name_scope(name): num_samples = tf.convert_to_tensor(value=num_samples) face_weights = tf.convert_to_tensor(value=face_weights) shape.check_static( tensor=face_weights, tensor_name="face_weights", has_rank_greater_than=0) shape.check_static( tensor=num_samples, tensor_name="num_samples", has_rank=0) face_weights = asserts.assert_all_above(face_weights, minval=0.0) eps = asserts.select_eps_for_division(face_weights.dtype) face_weights = face_weights + eps sampled_face_indices = _random_categorical_sample( num_samples=num_samples, weights=face_weights, seed=seed, stateless=stateless) return sampled_face_indices def generate_random_barycentric_coordinates( sample_shape, dtype=tf.dtypes.float32, seed=None, stateless=False, name="generate_random_barycentric_coordinates"): """Generate uniformly sampled random barycentric coordinates. Note: In the following, A1 to An are optional batch dimensions. Args: sample_shape: An `int` tensor with shape `[n+1,]` and values `(A1, ..., An, num_samples)` denoting total number of random samples drawn, where `n` is number of batch dimensions, and `num_samples` is the number of samples drawn for each mesh. dtype: Optional type of generated barycentric coordinates, defaults to float32. seed: An optional random seed. stateless: Optional flag to use stateless random sampler. If stateless=True, then `seed` must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate the same reproducible sequence across calls. If stateless=False, then a stateful random number generator is used (default behavior). name: Name for op. Defaults to "generate_random_barycentric_coordinates". Returns: A `dtype` tensor of shape [A1, ..., An, num_samples, 3], where the last dimension contains the sampled barycentric coordinates. """ with tf.name_scope(name): sample_shape = tf.convert_to_tensor(value=sample_shape) shape.check_static( tensor=sample_shape, tensor_name="sample_shape", has_rank=1) sample_shape = tf.concat((sample_shape, (2,)), axis=0) if stateless: seed = tf.convert_to_tensor(value=seed) shape.check_static( tensor=seed, tensor_name="seed", has_dim_equals=(-1, 2)) sample_fn = tf.random.stateless_uniform else: sample_fn = tf.random.uniform random_uniform = sample_fn( shape=sample_shape, minval=0.0, maxval=1.0, dtype=dtype, seed=seed) random1 = tf.sqrt(random_uniform[..., 0]) random2 = random_uniform[..., 1] barycentric = tf.stack( (1 - random1, random1 * (1 - random2), random1 * random2), axis=-1) return barycentric def weighted_random_sample_triangle_mesh( vertex_attributes, faces, num_samples, face_weights, seed=None, stateless=False, name="weighted_random_sample_triangle_mesh"): """Performs a face probability weighted random sampling of a tri mesh. Note: In the following, A1 to An are optional batch dimensions. Args: vertex_attributes: A `float` tensor of shape `[A1, ..., An, V, D]`, where V is the number of vertices, and D is dimensionality of each vertex. faces: A `int` tensor of shape `[A1, ..., An, F, 3]`, where F is the number of faces. num_samples: A `int` 0-D tensor denoting number of samples to be drawn from each mesh. face_weights: A `float` tensor of shape ``[A1, ..., An, F]`, denoting unnormalized sampling probability of each face, where F is the number of faces. seed: Optional random seed. stateless: Optional flag to use stateless random sampler. If stateless=True, then seed must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate same sequence across calls. name: Name for op. Defaults to "weighted_random_sample_triangle_mesh". Returns: sample_points: A `float` tensor of shape `[A1, ..., An, num_samples, D]`, where D is dimensionality of each sampled point. sample_face_indices: A `int` tensor of shape `[A1, ..., An, num_samples]`. """ with tf.name_scope(name): faces = tf.convert_to_tensor(value=faces) vertex_attributes = tf.convert_to_tensor(value=vertex_attributes) face_weights = tf.convert_to_tensor(value=face_weights) num_samples = tf.convert_to_tensor(value=num_samples) shape.check_static( tensor=vertex_attributes, tensor_name="vertex_attributes", has_rank_greater_than=1) shape.check_static( tensor=faces, tensor_name="faces", has_rank_greater_than=1) shape.check_static( tensor=face_weights, tensor_name="face_weights", has_rank_greater_than=0) shape.compare_batch_dimensions( tensors=(faces, face_weights), last_axes=(-2, -1), tensor_names=("faces", "face_weights"), broadcast_compatible=False) shape.compare_batch_dimensions( tensors=(vertex_attributes, faces, face_weights), last_axes=(-3, -3, -2), tensor_names=("vertex_attributes", "faces", "face_weights"), broadcast_compatible=False) asserts.assert_all_above(face_weights, 0) batch_dims = faces.shape.ndims - 2 batch_shape = faces.shape.as_list()[:-2] sample_shape = tf.concat( (batch_shape, tf.convert_to_tensor( value=(num_samples,), dtype=tf.int32)), axis=0) sample_face_indices = generate_random_face_indices( num_samples, face_weights, seed=seed, stateless=stateless) sample_vertex_indices = tf.gather( faces, sample_face_indices, batch_dims=batch_dims) sample_vertices = tf.gather( vertex_attributes, sample_vertex_indices, batch_dims=batch_dims) barycentric = generate_random_barycentric_coordinates( sample_shape, dtype=vertex_attributes.dtype, seed=seed, stateless=stateless) barycentric = tf.expand_dims(barycentric, axis=-1) sample_points = tf.math.multiply(sample_vertices, barycentric) sample_points = tf.reduce_sum(input_tensor=sample_points, axis=-2) return sample_points, sample_face_indices def area_weighted_random_sample_triangle_mesh( vertex_attributes, faces, num_samples, vertex_positions=None, seed=None, stateless=False, name="area_weighted_random_sample_triangle_mesh"): """Performs a face area weighted random sampling of a tri mesh. Note: In the following, A1 to An are optional batch dimensions. Args: vertex_attributes: A `float` tensor of shape `[A1, ..., An, V, D]`, where V is the number of vertices, and D is dimensionality of a feature defined on each vertex. If `vertex_positions` is not provided, then first 3 dimensions of `vertex_attributes` denote the vertex positions. faces: A `int` tensor of shape `[A1, ..., An, F, 3]`, where F is the number of faces. num_samples: An `int` scalar denoting number of samples to be drawn from each mesh. vertex_positions: An optional `float` tensor of shape `[A1, ..., An, V, 3]`, where V is the number of vertices. If None, then vertex_attributes[..., :3] is used as vertex positions. seed: Optional random seed. stateless: Optional flag to use stateless random sampler. If stateless=True, then seed must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate same sequence across calls. name: Name for op. Defaults to "area_weighted_random_sample_triangle_mesh". Returns: sample_pts: A `float` tensor of shape `[A1, ..., An, num_samples, D]`, where D is dimensionality of each sampled point. sample_face_indices: A `int` tensor of shape `[A1, ..., An, num_samples]`. """ with tf.name_scope(name): faces = tf.convert_to_tensor(value=faces) vertex_attributes = tf.convert_to_tensor(value=vertex_attributes) num_samples = tf.convert_to_tensor(value=num_samples) shape.check_static( tensor=vertex_attributes, tensor_name="vertex_attributes", has_rank_greater_than=1) shape.check_static( tensor=vertex_attributes, tensor_name="vertex_attributes", has_dim_greater_than=(-1, 2)) if vertex_positions is not None: vertex_positions = tf.convert_to_tensor(value=vertex_positions) else: vertex_positions = vertex_attributes[..., :3] shape.check_static( tensor=vertex_positions, tensor_name="vertex_positions", has_rank_greater_than=1) shape.check_static( tensor=vertex_positions, tensor_name="vertex_positions", has_dim_equals=(-1, 3)) triangle_vertex_positions = normals.gather_faces(vertex_positions, faces) triangle_areas = triangle_area(triangle_vertex_positions[..., 0, :], triangle_vertex_positions[..., 1, :], triangle_vertex_positions[..., 2, :]) return weighted_random_sample_triangle_mesh( vertex_attributes, faces, num_samples, face_weights=triangle_areas, seed=seed, stateless=stateless) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/geometry/deformation_energy/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Deformation energies module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_graphics.geometry.deformation_energy import as_conformal_as_possible from tensorflow_graphics.util import export_api as _export_api # API contains submodules of tensorflow_graphics.deformation_energy. __all__ = _export_api.get_modules()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Deformation energies module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_graphics.geometry.deformation_energy import as_conformal_as_possible from tensorflow_graphics.util import export_api as _export_api # API contains submodules of tensorflow_graphics.deformation_energy. __all__ = _export_api.get_modules()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/datasets/features/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """`tensorflow_graphics.datasets.features` API defining feature types.""" from tensorflow_graphics.datasets.features.camera_feature import Camera from tensorflow_graphics.datasets.features.pose_feature import Pose from tensorflow_graphics.datasets.features.trimesh_feature import TriangleMesh from tensorflow_graphics.datasets.features.voxel_feature import VoxelGrid __all__ = [ "TriangleMesh", "VoxelGrid", "Camera", "Pose" ]
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """`tensorflow_graphics.datasets.features` API defining feature types.""" from tensorflow_graphics.datasets.features.camera_feature import Camera from tensorflow_graphics.datasets.features.pose_feature import Pose from tensorflow_graphics.datasets.features.trimesh_feature import TriangleMesh from tensorflow_graphics.datasets.features.voxel_feature import VoxelGrid __all__ = [ "TriangleMesh", "VoxelGrid", "Camera", "Pose" ]
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/projects/pointnet/train.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training loop for PointNet v1 on modelnet40.""" # pylint: disable=missing-function-docstring import tensorflow as tf from tensorflow_graphics.datasets import modelnet40 from tensorflow_graphics.nn.layer import pointnet import tqdm # pylint: disable=g-bad-import-order from . import augment # pylint: disable=g-bad-import-order from . import helpers # pylint: disable=g-bad-import-order # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ parser = helpers.ArgumentParser() parser.add("--batch_size", 32) parser.add("--num_epochs", 250) parser.add("--num_points", 2048, help="subsampled (max 2048)") parser.add("--learning_rate", 1e-3, help="initial Adam learning rate") parser.add("--lr_decay", True, help="enable learning rate decay") parser.add("--bn_decay", .5, help="batch norm decay momentum") parser.add("--tb_every", 100, help="tensorboard frequency (iterations)") parser.add("--ev_every", 308, help="evaluation frequency (iterations)") parser.add("--augment", True, help="use augmentations") parser.add("--tqdm", True, help="enable the progress bar") FLAGS = parser.parse_args() # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ if FLAGS.lr_decay: lr_scheduler = tf.keras.optimizers.schedules.ExponentialDecay( FLAGS.learning_rate, decay_steps=6250, #< 200.000 / 32 (batch size) (from original pointnet) decay_rate=0.7, staircase=True) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_scheduler) else: optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate) # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ model = pointnet.PointNetVanillaClassifier( num_classes=40, momentum=FLAGS.bn_decay) # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ @tf.function def wrapped_tf_function(points, label): """Performs one step of minimization of the loss.""" # --- subsampling (order DO matter) points = points[0:FLAGS.num_points, ...] # --- augmentation if FLAGS.augment: points = tf.map_fn(augment.rotate, points) points = augment.jitter(points) # --- training with tf.GradientTape() as tape: logits = model(points, training=True) loss = model.loss(label, logits) variables = model.trainable_variables gradients = tape.gradient(loss, variables) optimizer.apply_gradients(zip(gradients, variables)) return loss def train(example): """Performs one step of minimization of the loss and populates the summary.""" points = example["points"] label = example["label"] step = optimizer.iterations.numpy() # --- optimize loss = wrapped_tf_function(points, label) if step % FLAGS.tb_every == 0: tf.summary.scalar(name="loss", data=loss, step=step) # --- report rate in summaries if FLAGS.lr_decay and step % FLAGS.tb_every == 0: tf.summary.scalar(name="learning_rate", data=lr_scheduler(step), step=step) # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ def evaluate(): """Identify the best accuracy reached during training.""" step = optimizer.iterations.numpy() if "best_accuracy" not in evaluate.__dict__: evaluate.best_accuracy = 0 if step % FLAGS.ev_every != 0: return evaluate.best_accuracy aggregator = tf.keras.metrics.SparseCategoricalAccuracy() for example in ds_test: points, labels = example["points"], example["label"] logits = model(points, training=False) aggregator.update_state(labels, logits) accuracy = aggregator.result() evaluate.best_accuracy = max(accuracy, evaluate.best_accuracy) tf.summary.scalar(name="accuracy_test", data=accuracy, step=step) return evaluate.best_accuracy # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ ds_train, info = modelnet40.ModelNet40.load(split="train", with_info=True) num_examples = info.splits["train"].num_examples ds_train = ds_train.shuffle(num_examples, reshuffle_each_iteration=True) ds_train = ds_train.repeat(FLAGS.num_epochs) ds_train = ds_train.batch(FLAGS.batch_size) ds_test = modelnet40.ModelNet40.load(split="test").batch(FLAGS.batch_size) # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ try: helpers.setup_tensorboard(FLAGS) helpers.summary_command(parser, FLAGS) total = tf.data.experimental.cardinality(ds_train).numpy() pbar = tqdm.tqdm(ds_train, leave=False, total=total, disable=not FLAGS.tqdm) for train_example in pbar: train(train_example) best_accuracy = evaluate() pbar.set_postfix_str("best accuracy: {:.3f}".format(best_accuracy)) except KeyboardInterrupt: helpers.handle_keyboard_interrupt(FLAGS)
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training loop for PointNet v1 on modelnet40.""" # pylint: disable=missing-function-docstring import tensorflow as tf from tensorflow_graphics.datasets import modelnet40 from tensorflow_graphics.nn.layer import pointnet import tqdm # pylint: disable=g-bad-import-order from . import augment # pylint: disable=g-bad-import-order from . import helpers # pylint: disable=g-bad-import-order # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ parser = helpers.ArgumentParser() parser.add("--batch_size", 32) parser.add("--num_epochs", 250) parser.add("--num_points", 2048, help="subsampled (max 2048)") parser.add("--learning_rate", 1e-3, help="initial Adam learning rate") parser.add("--lr_decay", True, help="enable learning rate decay") parser.add("--bn_decay", .5, help="batch norm decay momentum") parser.add("--tb_every", 100, help="tensorboard frequency (iterations)") parser.add("--ev_every", 308, help="evaluation frequency (iterations)") parser.add("--augment", True, help="use augmentations") parser.add("--tqdm", True, help="enable the progress bar") FLAGS = parser.parse_args() # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ if FLAGS.lr_decay: lr_scheduler = tf.keras.optimizers.schedules.ExponentialDecay( FLAGS.learning_rate, decay_steps=6250, #< 200.000 / 32 (batch size) (from original pointnet) decay_rate=0.7, staircase=True) optimizer = tf.keras.optimizers.Adam(learning_rate=lr_scheduler) else: optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate) # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ model = pointnet.PointNetVanillaClassifier( num_classes=40, momentum=FLAGS.bn_decay) # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ @tf.function def wrapped_tf_function(points, label): """Performs one step of minimization of the loss.""" # --- subsampling (order DO matter) points = points[0:FLAGS.num_points, ...] # --- augmentation if FLAGS.augment: points = tf.map_fn(augment.rotate, points) points = augment.jitter(points) # --- training with tf.GradientTape() as tape: logits = model(points, training=True) loss = model.loss(label, logits) variables = model.trainable_variables gradients = tape.gradient(loss, variables) optimizer.apply_gradients(zip(gradients, variables)) return loss def train(example): """Performs one step of minimization of the loss and populates the summary.""" points = example["points"] label = example["label"] step = optimizer.iterations.numpy() # --- optimize loss = wrapped_tf_function(points, label) if step % FLAGS.tb_every == 0: tf.summary.scalar(name="loss", data=loss, step=step) # --- report rate in summaries if FLAGS.lr_decay and step % FLAGS.tb_every == 0: tf.summary.scalar(name="learning_rate", data=lr_scheduler(step), step=step) # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ def evaluate(): """Identify the best accuracy reached during training.""" step = optimizer.iterations.numpy() if "best_accuracy" not in evaluate.__dict__: evaluate.best_accuracy = 0 if step % FLAGS.ev_every != 0: return evaluate.best_accuracy aggregator = tf.keras.metrics.SparseCategoricalAccuracy() for example in ds_test: points, labels = example["points"], example["label"] logits = model(points, training=False) aggregator.update_state(labels, logits) accuracy = aggregator.result() evaluate.best_accuracy = max(accuracy, evaluate.best_accuracy) tf.summary.scalar(name="accuracy_test", data=accuracy, step=step) return evaluate.best_accuracy # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ ds_train, info = modelnet40.ModelNet40.load(split="train", with_info=True) num_examples = info.splits["train"].num_examples ds_train = ds_train.shuffle(num_examples, reshuffle_each_iteration=True) ds_train = ds_train.repeat(FLAGS.num_epochs) ds_train = ds_train.batch(FLAGS.batch_size) ds_test = modelnet40.ModelNet40.load(split="test").batch(FLAGS.batch_size) # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------ try: helpers.setup_tensorboard(FLAGS) helpers.summary_command(parser, FLAGS) total = tf.data.experimental.cardinality(ds_train).numpy() pbar = tqdm.tqdm(ds_train, leave=False, total=total, disable=not FLAGS.tqdm) for train_example in pbar: train(train_example) best_accuracy = evaluate() pbar.set_postfix_str("best accuracy: {:.3f}".format(best_accuracy)) except KeyboardInterrupt: helpers.handle_keyboard_interrupt(FLAGS)
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/nn/layer/tests/graph_convolution_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the graph convolution layers.""" from absl.testing import parameterized import numpy as np import tensorflow as tf import tensorflow_graphics.nn.layer.graph_convolution as gc_layer from tensorflow_graphics.util import test_case def _dense_to_sparse(data): """Convert a numpy array to a tf.SparseTensor.""" indices = np.where(data) return tf.SparseTensor( np.stack(indices, axis=-1), data[indices], dense_shape=data.shape) def _dummy_data(batch_size, num_vertices, num_channels): """Create inputs for feature_steered_convolution.""" if batch_size > 0: data = np.zeros( shape=(batch_size, num_vertices, num_channels), dtype=np.float32) neighbors = _dense_to_sparse( np.tile(np.eye(num_vertices, dtype=np.float32), (batch_size, 1, 1))) else: data = np.zeros(shape=(num_vertices, num_channels), dtype=np.float32) neighbors = _dense_to_sparse(np.eye(num_vertices, dtype=np.float32)) return data, neighbors class GraphConvolutionTestFeatureSteeredConvolutionLayerTests( test_case.TestCase): @parameterized.parameters( (1, 1, 1, 1, 1, False), (4, 2, 3, None, 5, False), (1, 2, 3, 4, 5, True), ) def test_feature_steered_convolution_layer_exception_not_raised_shapes( self, batch_size, num_vertices, in_channels, out_channels, num_weight_matrices, translation_invariant): """Check if the convolution parameters and output have correct shapes.""" data, neighbors = _dummy_data(batch_size, num_vertices, in_channels) name_scope = "test" if tf.executing_eagerly(): layer = gc_layer.FeatureSteeredConvolutionKerasLayer( translation_invariant=translation_invariant, num_weight_matrices=num_weight_matrices, num_output_channels=out_channels, name=name_scope) def _run_convolution(): """Run the appropriate feature steered convolution layer.""" if tf.executing_eagerly(): try: output = layer(inputs=[data, neighbors], sizes=None) except Exception as e: # pylint: disable=broad-except self.fail("Exception raised: %s" % str(e)) else: try: output = gc_layer.feature_steered_convolution_layer( data=data, neighbors=neighbors, sizes=None, translation_invariant=translation_invariant, num_weight_matrices=num_weight_matrices, num_output_channels=out_channels, var_name=name_scope) except Exception as e: # pylint: disable=broad-except self.fail("Exception raised: %s" % str(e)) return output output = _run_convolution() output_shape = output.shape.as_list() out_channels = in_channels if out_channels is None else out_channels self.assertEqual(output_shape[-1], out_channels) self.assertAllEqual(output_shape[:-1], data.shape[:-1]) def _get_var_shape(var_name): """Get the shape of a variable by name.""" if tf.executing_eagerly(): trainable_variables = layer.trainable_variables for tv in trainable_variables: if tv.name == name_scope + "/" + var_name + ":0": return tv.shape.as_list() raise ValueError("Variable not found.") else: with tf.compat.v1.variable_scope(name_scope, reuse=True): variable = tf.compat.v1.get_variable( var_name, initializer=tf.constant(0)) return variable.shape.as_list() self.assertAllEqual(_get_var_shape("u"), [in_channels, num_weight_matrices]) self.assertAllEqual(_get_var_shape("c"), [num_weight_matrices]) self.assertAllEqual(_get_var_shape("b"), [out_channels]) self.assertAllEqual( _get_var_shape("w"), [num_weight_matrices, in_channels, out_channels]) if not translation_invariant: self.assertAllEqual( _get_var_shape("v"), [in_channels, num_weight_matrices]) def test_feature_steered_convolution_layer_initializer(self): """Tests a custom variable initializer.""" data = np.array(((1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0))) neighbors_indices = np.array(((0, 0), (0, 1), (0, 3), (1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (2, 3), (3, 0), (3, 2), (3, 3))) neighbors = tf.SparseTensor( neighbors_indices, np.ones(shape=(12,)) / 3.0, dense_shape=(4, 4)) initializer = tf.compat.v1.keras.initializers.zeros() if tf.executing_eagerly(): layer = gc_layer.FeatureSteeredConvolutionKerasLayer( translation_invariant=False, initializer=initializer) output = layer(inputs=[data, neighbors], sizes=None) else: out = gc_layer.feature_steered_convolution_layer( data=data, neighbors=neighbors, sizes=None, translation_invariant=False, initializer=initializer) self.evaluate(tf.compat.v1.global_variables_initializer()) output = self.evaluate(out) # All zeros initializer should result in all zeros output. self.assertAllEqual(output, np.zeros_like(data)) def test_feature_steered_convolution_layer_training(self): """Test a simple training loop.""" # Generate a small valid input for a simple training task. # Four corners of a square. data = np.array(((1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0))) neighbors_indices = np.array(((0, 0), (0, 1), (0, 3), (1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (2, 3), (3, 0), (3, 2), (3, 3))) neighbors = tf.SparseTensor( neighbors_indices, np.ones(shape=(12,)) / 3.0, dense_shape=(4, 4)) # Desired output is arbitrary. labels = np.reshape([-1.0, -0.5, 0.5, 1.0], (-1, 1)) num_training_iterations = 5 if tf.executing_eagerly(): with tf.GradientTape(persistent=True) as tape: layer = gc_layer.FeatureSteeredConvolutionKerasLayer( translation_invariant=False, num_weight_matrices=1, num_output_channels=1) output = layer(inputs=[data, neighbors], sizes=None) loss = tf.nn.l2_loss(output - labels) trainable_variables = layer.trainable_variables for _ in range(num_training_iterations): grads = tape.gradient(loss, trainable_variables) tf.compat.v1.train.GradientDescentOptimizer(1e-4).apply_gradients( zip(grads, trainable_variables)) else: output = gc_layer.feature_steered_convolution_layer( data=data, neighbors=neighbors, sizes=None, translation_invariant=False, num_weight_matrices=1, num_output_channels=1) train_op = tf.compat.v1.train.GradientDescentOptimizer(1e-4).minimize( tf.nn.l2_loss(output - labels)) with tf.compat.v1.Session() as sess: sess.run(tf.compat.v1.initialize_all_variables()) for _ in range(num_training_iterations): sess.run(train_op) class GraphConvolutionTestDynamicGraphConvolutionKerasLayerTests( test_case.TestCase): @parameterized.parameters( (1, 1, 1, 1, "weighted"), (4, 2, 3, 12, "max"), (1, 2, 3, 4, "max"), ) def test_dynamic_graph_convolution_keras_layer_exception_not_raised_shapes( self, batch_size, num_vertices, in_channels, out_channels, reduction): """Check if the convolution parameters and output have correct shapes.""" if not tf.executing_eagerly(): return data, neighbors = _dummy_data(batch_size, num_vertices, in_channels) layer = gc_layer.DynamicGraphConvolutionKerasLayer( num_output_channels=out_channels, reduction=reduction) try: output = layer(inputs=[data, neighbors], sizes=None) except Exception as e: # pylint: disable=broad-except self.fail("Exception raised: %s" % str(e)) self.assertAllEqual((batch_size, num_vertices, out_channels), output.shape) @parameterized.parameters( (1, 1, 1, 1, "weighted"), (4, 2, 3, 12, "max"), (1, 2, 3, 4, "max"), ) def test_dynamic_graph_convolution_keras_layer_zero_kernel( self, batch_size, num_vertices, in_channels, out_channels, reduction): """Tests convolution with an all-zeros kernel.""" if not tf.executing_eagerly(): return data, neighbors = _dummy_data(batch_size, num_vertices, in_channels) data = np.random.uniform(size=data.shape).astype(np.float32) layer = gc_layer.DynamicGraphConvolutionKerasLayer( num_output_channels=out_channels, reduction=reduction, use_bias=False, kernel_initializer=tf.compat.v1.keras.initializers.zeros()) output = layer(inputs=[data, neighbors], sizes=None) self.assertAllEqual( output, np.zeros(shape=(batch_size, num_vertices, out_channels), dtype=np.float32)) @parameterized.parameters((1, 1, 1), (2, 3, 12), (2, 3, 4)) def test_dynamic_graph_convolution_keras_layer_duplicate_features( self, num_vertices, in_channels, out_channels): """Tests convolution when all vertex features are identical.""" if not tf.executing_eagerly(): return data = np.random.uniform(size=(1, in_channels)) data = np.tile(data, (num_vertices, 1)) # Results should be independent of 'neighbors'. neighbors = np.maximum(np.random.randint( 0, 2, size=(num_vertices, num_vertices)), np.eye(num_vertices)) neighbors = _dense_to_sparse(neighbors) layer = gc_layer.DynamicGraphConvolutionKerasLayer( num_output_channels=out_channels, reduction="max") output = layer(inputs=[data, neighbors], sizes=None) output_tile = tf.tile(output[:1, :], (num_vertices, 1)) self.assertAllEqual(output, output_tile) @parameterized.parameters("weighted", "max") def test_dynamic_graph_convolution_keras_layer_training(self, reduction): """Test a simple training loop.""" if not tf.executing_eagerly(): return # Generate a small valid input for a simple training task. # Four corners of a square. data = np.array(((1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0))) neighbors_indices = np.array(((0, 0), (0, 1), (0, 3), (1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (2, 3), (3, 0), (3, 2), (3, 3))) neighbors = tf.SparseTensor( neighbors_indices, np.ones(shape=(12,)) / 3.0, dense_shape=(4, 4)) # Desired output is arbitrary. labels = np.reshape([-1.0, -0.5, 0.5, 1.0], (-1, 1)) num_training_iterations = 5 with tf.GradientTape(persistent=True) as tape: layer = gc_layer.DynamicGraphConvolutionKerasLayer( num_output_channels=2, reduction=reduction) output = layer(inputs=[data, neighbors], sizes=None) loss = tf.nn.l2_loss(output - labels) trainable_variables = layer.trainable_variables for _ in range(num_training_iterations): grads = tape.gradient(loss, trainable_variables) tf.compat.v1.train.GradientDescentOptimizer(1e-4).apply_gradients( zip(grads, trainable_variables)) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the graph convolution layers.""" from absl.testing import parameterized import numpy as np import tensorflow as tf import tensorflow_graphics.nn.layer.graph_convolution as gc_layer from tensorflow_graphics.util import test_case def _dense_to_sparse(data): """Convert a numpy array to a tf.SparseTensor.""" indices = np.where(data) return tf.SparseTensor( np.stack(indices, axis=-1), data[indices], dense_shape=data.shape) def _dummy_data(batch_size, num_vertices, num_channels): """Create inputs for feature_steered_convolution.""" if batch_size > 0: data = np.zeros( shape=(batch_size, num_vertices, num_channels), dtype=np.float32) neighbors = _dense_to_sparse( np.tile(np.eye(num_vertices, dtype=np.float32), (batch_size, 1, 1))) else: data = np.zeros(shape=(num_vertices, num_channels), dtype=np.float32) neighbors = _dense_to_sparse(np.eye(num_vertices, dtype=np.float32)) return data, neighbors class GraphConvolutionTestFeatureSteeredConvolutionLayerTests( test_case.TestCase): @parameterized.parameters( (1, 1, 1, 1, 1, False), (4, 2, 3, None, 5, False), (1, 2, 3, 4, 5, True), ) def test_feature_steered_convolution_layer_exception_not_raised_shapes( self, batch_size, num_vertices, in_channels, out_channels, num_weight_matrices, translation_invariant): """Check if the convolution parameters and output have correct shapes.""" data, neighbors = _dummy_data(batch_size, num_vertices, in_channels) name_scope = "test" if tf.executing_eagerly(): layer = gc_layer.FeatureSteeredConvolutionKerasLayer( translation_invariant=translation_invariant, num_weight_matrices=num_weight_matrices, num_output_channels=out_channels, name=name_scope) def _run_convolution(): """Run the appropriate feature steered convolution layer.""" if tf.executing_eagerly(): try: output = layer(inputs=[data, neighbors], sizes=None) except Exception as e: # pylint: disable=broad-except self.fail("Exception raised: %s" % str(e)) else: try: output = gc_layer.feature_steered_convolution_layer( data=data, neighbors=neighbors, sizes=None, translation_invariant=translation_invariant, num_weight_matrices=num_weight_matrices, num_output_channels=out_channels, var_name=name_scope) except Exception as e: # pylint: disable=broad-except self.fail("Exception raised: %s" % str(e)) return output output = _run_convolution() output_shape = output.shape.as_list() out_channels = in_channels if out_channels is None else out_channels self.assertEqual(output_shape[-1], out_channels) self.assertAllEqual(output_shape[:-1], data.shape[:-1]) def _get_var_shape(var_name): """Get the shape of a variable by name.""" if tf.executing_eagerly(): trainable_variables = layer.trainable_variables for tv in trainable_variables: if tv.name == name_scope + "/" + var_name + ":0": return tv.shape.as_list() raise ValueError("Variable not found.") else: with tf.compat.v1.variable_scope(name_scope, reuse=True): variable = tf.compat.v1.get_variable( var_name, initializer=tf.constant(0)) return variable.shape.as_list() self.assertAllEqual(_get_var_shape("u"), [in_channels, num_weight_matrices]) self.assertAllEqual(_get_var_shape("c"), [num_weight_matrices]) self.assertAllEqual(_get_var_shape("b"), [out_channels]) self.assertAllEqual( _get_var_shape("w"), [num_weight_matrices, in_channels, out_channels]) if not translation_invariant: self.assertAllEqual( _get_var_shape("v"), [in_channels, num_weight_matrices]) def test_feature_steered_convolution_layer_initializer(self): """Tests a custom variable initializer.""" data = np.array(((1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0))) neighbors_indices = np.array(((0, 0), (0, 1), (0, 3), (1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (2, 3), (3, 0), (3, 2), (3, 3))) neighbors = tf.SparseTensor( neighbors_indices, np.ones(shape=(12,)) / 3.0, dense_shape=(4, 4)) initializer = tf.compat.v1.keras.initializers.zeros() if tf.executing_eagerly(): layer = gc_layer.FeatureSteeredConvolutionKerasLayer( translation_invariant=False, initializer=initializer) output = layer(inputs=[data, neighbors], sizes=None) else: out = gc_layer.feature_steered_convolution_layer( data=data, neighbors=neighbors, sizes=None, translation_invariant=False, initializer=initializer) self.evaluate(tf.compat.v1.global_variables_initializer()) output = self.evaluate(out) # All zeros initializer should result in all zeros output. self.assertAllEqual(output, np.zeros_like(data)) def test_feature_steered_convolution_layer_training(self): """Test a simple training loop.""" # Generate a small valid input for a simple training task. # Four corners of a square. data = np.array(((1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0))) neighbors_indices = np.array(((0, 0), (0, 1), (0, 3), (1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (2, 3), (3, 0), (3, 2), (3, 3))) neighbors = tf.SparseTensor( neighbors_indices, np.ones(shape=(12,)) / 3.0, dense_shape=(4, 4)) # Desired output is arbitrary. labels = np.reshape([-1.0, -0.5, 0.5, 1.0], (-1, 1)) num_training_iterations = 5 if tf.executing_eagerly(): with tf.GradientTape(persistent=True) as tape: layer = gc_layer.FeatureSteeredConvolutionKerasLayer( translation_invariant=False, num_weight_matrices=1, num_output_channels=1) output = layer(inputs=[data, neighbors], sizes=None) loss = tf.nn.l2_loss(output - labels) trainable_variables = layer.trainable_variables for _ in range(num_training_iterations): grads = tape.gradient(loss, trainable_variables) tf.compat.v1.train.GradientDescentOptimizer(1e-4).apply_gradients( zip(grads, trainable_variables)) else: output = gc_layer.feature_steered_convolution_layer( data=data, neighbors=neighbors, sizes=None, translation_invariant=False, num_weight_matrices=1, num_output_channels=1) train_op = tf.compat.v1.train.GradientDescentOptimizer(1e-4).minimize( tf.nn.l2_loss(output - labels)) with tf.compat.v1.Session() as sess: sess.run(tf.compat.v1.initialize_all_variables()) for _ in range(num_training_iterations): sess.run(train_op) class GraphConvolutionTestDynamicGraphConvolutionKerasLayerTests( test_case.TestCase): @parameterized.parameters( (1, 1, 1, 1, "weighted"), (4, 2, 3, 12, "max"), (1, 2, 3, 4, "max"), ) def test_dynamic_graph_convolution_keras_layer_exception_not_raised_shapes( self, batch_size, num_vertices, in_channels, out_channels, reduction): """Check if the convolution parameters and output have correct shapes.""" if not tf.executing_eagerly(): return data, neighbors = _dummy_data(batch_size, num_vertices, in_channels) layer = gc_layer.DynamicGraphConvolutionKerasLayer( num_output_channels=out_channels, reduction=reduction) try: output = layer(inputs=[data, neighbors], sizes=None) except Exception as e: # pylint: disable=broad-except self.fail("Exception raised: %s" % str(e)) self.assertAllEqual((batch_size, num_vertices, out_channels), output.shape) @parameterized.parameters( (1, 1, 1, 1, "weighted"), (4, 2, 3, 12, "max"), (1, 2, 3, 4, "max"), ) def test_dynamic_graph_convolution_keras_layer_zero_kernel( self, batch_size, num_vertices, in_channels, out_channels, reduction): """Tests convolution with an all-zeros kernel.""" if not tf.executing_eagerly(): return data, neighbors = _dummy_data(batch_size, num_vertices, in_channels) data = np.random.uniform(size=data.shape).astype(np.float32) layer = gc_layer.DynamicGraphConvolutionKerasLayer( num_output_channels=out_channels, reduction=reduction, use_bias=False, kernel_initializer=tf.compat.v1.keras.initializers.zeros()) output = layer(inputs=[data, neighbors], sizes=None) self.assertAllEqual( output, np.zeros(shape=(batch_size, num_vertices, out_channels), dtype=np.float32)) @parameterized.parameters((1, 1, 1), (2, 3, 12), (2, 3, 4)) def test_dynamic_graph_convolution_keras_layer_duplicate_features( self, num_vertices, in_channels, out_channels): """Tests convolution when all vertex features are identical.""" if not tf.executing_eagerly(): return data = np.random.uniform(size=(1, in_channels)) data = np.tile(data, (num_vertices, 1)) # Results should be independent of 'neighbors'. neighbors = np.maximum(np.random.randint( 0, 2, size=(num_vertices, num_vertices)), np.eye(num_vertices)) neighbors = _dense_to_sparse(neighbors) layer = gc_layer.DynamicGraphConvolutionKerasLayer( num_output_channels=out_channels, reduction="max") output = layer(inputs=[data, neighbors], sizes=None) output_tile = tf.tile(output[:1, :], (num_vertices, 1)) self.assertAllEqual(output, output_tile) @parameterized.parameters("weighted", "max") def test_dynamic_graph_convolution_keras_layer_training(self, reduction): """Test a simple training loop.""" if not tf.executing_eagerly(): return # Generate a small valid input for a simple training task. # Four corners of a square. data = np.array(((1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0))) neighbors_indices = np.array(((0, 0), (0, 1), (0, 3), (1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (2, 3), (3, 0), (3, 2), (3, 3))) neighbors = tf.SparseTensor( neighbors_indices, np.ones(shape=(12,)) / 3.0, dense_shape=(4, 4)) # Desired output is arbitrary. labels = np.reshape([-1.0, -0.5, 0.5, 1.0], (-1, 1)) num_training_iterations = 5 with tf.GradientTape(persistent=True) as tape: layer = gc_layer.DynamicGraphConvolutionKerasLayer( num_output_channels=2, reduction=reduction) output = layer(inputs=[data, neighbors], sizes=None) loss = tf.nn.l2_loss(output - labels) trainable_variables = layer.trainable_variables for _ in range(num_training_iterations): grads = tape.gradient(loss, trainable_variables) tf.compat.v1.train.GradientDescentOptimizer(1e-4).apply_gradients( zip(grads, trainable_variables)) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/math/interpolation/tests/slerp_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for slerp.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.math.interpolation import slerp from tensorflow_graphics.util import test_case _SQRT2_DIV2 = np.sqrt(2.0).astype(np.float32) * 0.5 class SlerpTest(test_case.TestCase): def _pick_random_quaternion(self): """Creates a random quaternion with random shape.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() return np.random.normal(size=tensor_shape + [4]) def _quaternion_slerp_helper(self, q1, q2, p): """Calls interpolate function for quaternions.""" return slerp.interpolate(q1, q2, p, slerp.InterpolationType.QUATERNION) def _vector_slerp_helper(self, q1, q2, p): """Calls interpolate function for vectors.""" return slerp.interpolate(q1, q2, p, slerp.InterpolationType.VECTOR) def test_interpolate_raises_exceptions(self): """Tests if unknown methods raise exceptions.""" vector1 = self._pick_random_quaternion() self.assert_exception_is_raised( slerp.interpolate, error_msg="Unknown interpolation type supplied.", shapes=[], vector1=vector1, vector2=-vector1, percent=0.1, method=2) def test_interpolate_with_weights_quaternion_preset(self): """Compares interpolate to quaternion_weights + interpolate_with_weights.""" q1 = self._pick_random_quaternion() q2 = q1 + tf.ones_like(q1) q1 = tf.nn.l2_normalize(q1, axis=-1) q2 = tf.nn.l2_normalize(q2, axis=-1) weight1, weight2 = slerp.quaternion_weights(q1, q2, 0.25) qf = slerp.interpolate_with_weights(q1, q2, weight1, weight2) qi = slerp.interpolate( q1, q2, 0.25, method=slerp.InterpolationType.QUATERNION) self.assertAllClose(qf, qi, atol=1e-9) def test_interpolate_with_weights_vector_preset(self): """Compares interpolate to vector_weights + interpolate_with_weights.""" # Any quaternion is a valid vector q1 = self._pick_random_quaternion() q2 = q1 + tf.ones_like(q1) weight1, weight2 = slerp.vector_weights(q1, q2, 0.75) qf = slerp.interpolate_with_weights(q1, q2, weight1, weight2) qi = slerp.interpolate(q1, q2, 0.75, method=slerp.InterpolationType.VECTOR) self.assertAllClose(qf, qi, atol=1e-9) @parameterized.parameters( # Orthogonal, same hemisphere (((1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.5,)), ((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),)), (((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)), ((0.5, 0.5, 0.5, 0.5),)), # Same hemisphere (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)), ((0.408248290463863, 0.0, 0.816496580927726, 0.408248290463863),)), # Same quaternions (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)), ((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),)), # Anti-polar - small percent (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.2,)), ((-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0),)), # Anti-polar - large percent (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.8,)), ((-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0),)), # Extrapolation - same hemisphere (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-0.5,)), ((0.408248290463863, -0.408248290463863, 0.816496580927726, 0.0),)), # Extrapolation - opposite hemisphere (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-0.5,)), ((-0.408248290463863, -0.408248290463863, -0.816496580927726, 0.0),)), ) def test_quaternion_slerp_preset(self, test_inputs, test_outputs): """Tests the accuracy of qslerp against numpy-quaternion values.""" test_inputs = [np.array(test_input).astype(np.float32) for test_input in test_inputs] self.assert_output_is_correct(self._quaternion_slerp_helper, test_inputs, test_outputs, tile=False) def test_unnormalized_quaternion_weights_exception_raised(self): """Tests if quaternion_weights raise exceptions for unnormalized input.""" q1 = self._pick_random_quaternion() q2 = tf.nn.l2_normalize(q1, axis=-1) p = tf.constant((0.5), dtype=q1.dtype) with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate(slerp.quaternion_weights(q1, q2, p)) @parameterized.parameters( ((4,), (4,), (1,)), ((None, 4), (None, 4), (None, 1)), ((None, 4), (None, 4), (None, 4)), ) def test_quaternion_weights_exception_not_raised(self, *shapes): """Tests that valid input shapes do not raise exceptions for qslerp.""" self.assert_exception_is_not_raised(slerp.quaternion_weights, shapes) @parameterized.parameters( ("must have exactly 4 dimensions in axis -1", (3,), (4,), (1,)), ("must have exactly 4 dimensions in axis -1", (4,), (3,), (1,)), ("Not all batch dimensions are broadcast-compatible.", (2, 4), (3, 4), (1,)), ("Not all batch dimensions are broadcast-compatible.", (1, 4), (3, 4), (2,)), ) def test_quaternion_weights_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised for qslerp.""" self.assert_exception_is_raised(slerp.quaternion_weights, error_msg, shapes) @parameterized.parameters( # Same quaternions (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)), ( (0.25,), (0.75,), )), # Anti-polar - small percent (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.2,)), ( (-0.8,), (0.2,), )), # Anti-polar - large percent (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.8,)), ( (-0.2,), (0.8,), )), ) def test_quaternion_weights_preset(self, test_inputs, test_outputs): """Tests the accuracy of quaternion_weights for problem cases.""" test_inputs = [np.array(test_input).astype(np.float32) for test_input in test_inputs] self.assert_output_is_correct(slerp.quaternion_weights, test_inputs, test_outputs, tile=False) @parameterized.parameters( ((3,), (3,), (1,)), ((None, 4), (None, 4), (None, 1)), ) def test_vector_weights_exception_not_raised(self, *shapes): """Tests that valid inputs do not raise exceptions for vector_weights.""" self.assert_exception_is_not_raised(slerp.vector_weights, shapes) @parameterized.parameters( ("must have the same number of dimensions in axes", (None, 3), (None, 4), (1,)), ("must have the same number of dimensions in axes", (2, 3), (2, 4), (1,)), ("Not all batch dimensions are broadcast-compatible.", (2, 3), (3, 3), (1,)), ("Not all batch dimensions are broadcast-compatible.", (1, 3), (3, 3), (2,)), ) def test_vector_weights_exception_raised(self, error_msg, *shapes): """Tests that shape exceptions are properly raised for vector_weights.""" self.assert_exception_is_raised(slerp.vector_weights, error_msg, shapes) @parameterized.parameters( # Orthogonal, same hemisphere (((1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.5,)), ((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),)), (((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)), ((0.5, 0.5, 0.5, 0.5),)), # Same hemisphere (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)), ((0.408248290463863, 0.0, 0.816496580927726, 0.408248290463863),)), # Same vectors (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)), ((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),)), # Anti-polar - equal weights (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.5,)), ((0.0, 0.0, 0.0, 0.0),)), # Anti-polar - small percent (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.25,)), ((0.5, 0.0, 0.5, 0.0),)), # Extrapolation - same hemisphere (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-1.0,)), ((0.0, -_SQRT2_DIV2, _SQRT2_DIV2, 0.0),)), # Extrapolation - opposite hemisphere (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (1.5,)), ((-_SQRT2_DIV2, -0.0, -_SQRT2_DIV2, 0.0),)), # Unnormalized vectors (((4.0, 0.0), (0.0, 1.0), (0.5,)), ((2.82842712, _SQRT2_DIV2),)), ) def test_vector_slerp_preset(self, test_inputs, test_outputs): """Tests the accuracy of vector slerp results.""" test_inputs = [np.array(test_input).astype(np.float32) for test_input in test_inputs] self.assert_output_is_correct(self._vector_slerp_helper, test_inputs, test_outputs, tile=False) def test_vector_weights_reduce_to_lerp_preset(self): """Tests if vector slerp reduces to lerp for identical vectors as input.""" q1 = tf.constant((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0)) q2 = tf.constant((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0)) p = tf.constant((0.75,), dtype=q1.dtype) w1, w2 = slerp.vector_weights(q1, q2, p) self.assertAllClose(w1, (0.25,), rtol=1e-6) self.assertAllClose(w2, (0.75,), rtol=1e-6) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for slerp.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.math.interpolation import slerp from tensorflow_graphics.util import test_case _SQRT2_DIV2 = np.sqrt(2.0).astype(np.float32) * 0.5 class SlerpTest(test_case.TestCase): def _pick_random_quaternion(self): """Creates a random quaternion with random shape.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() return np.random.normal(size=tensor_shape + [4]) def _quaternion_slerp_helper(self, q1, q2, p): """Calls interpolate function for quaternions.""" return slerp.interpolate(q1, q2, p, slerp.InterpolationType.QUATERNION) def _vector_slerp_helper(self, q1, q2, p): """Calls interpolate function for vectors.""" return slerp.interpolate(q1, q2, p, slerp.InterpolationType.VECTOR) def test_interpolate_raises_exceptions(self): """Tests if unknown methods raise exceptions.""" vector1 = self._pick_random_quaternion() self.assert_exception_is_raised( slerp.interpolate, error_msg="Unknown interpolation type supplied.", shapes=[], vector1=vector1, vector2=-vector1, percent=0.1, method=2) def test_interpolate_with_weights_quaternion_preset(self): """Compares interpolate to quaternion_weights + interpolate_with_weights.""" q1 = self._pick_random_quaternion() q2 = q1 + tf.ones_like(q1) q1 = tf.nn.l2_normalize(q1, axis=-1) q2 = tf.nn.l2_normalize(q2, axis=-1) weight1, weight2 = slerp.quaternion_weights(q1, q2, 0.25) qf = slerp.interpolate_with_weights(q1, q2, weight1, weight2) qi = slerp.interpolate( q1, q2, 0.25, method=slerp.InterpolationType.QUATERNION) self.assertAllClose(qf, qi, atol=1e-9) def test_interpolate_with_weights_vector_preset(self): """Compares interpolate to vector_weights + interpolate_with_weights.""" # Any quaternion is a valid vector q1 = self._pick_random_quaternion() q2 = q1 + tf.ones_like(q1) weight1, weight2 = slerp.vector_weights(q1, q2, 0.75) qf = slerp.interpolate_with_weights(q1, q2, weight1, weight2) qi = slerp.interpolate(q1, q2, 0.75, method=slerp.InterpolationType.VECTOR) self.assertAllClose(qf, qi, atol=1e-9) @parameterized.parameters( # Orthogonal, same hemisphere (((1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.5,)), ((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),)), (((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)), ((0.5, 0.5, 0.5, 0.5),)), # Same hemisphere (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)), ((0.408248290463863, 0.0, 0.816496580927726, 0.408248290463863),)), # Same quaternions (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)), ((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),)), # Anti-polar - small percent (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.2,)), ((-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0),)), # Anti-polar - large percent (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.8,)), ((-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0),)), # Extrapolation - same hemisphere (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-0.5,)), ((0.408248290463863, -0.408248290463863, 0.816496580927726, 0.0),)), # Extrapolation - opposite hemisphere (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-0.5,)), ((-0.408248290463863, -0.408248290463863, -0.816496580927726, 0.0),)), ) def test_quaternion_slerp_preset(self, test_inputs, test_outputs): """Tests the accuracy of qslerp against numpy-quaternion values.""" test_inputs = [np.array(test_input).astype(np.float32) for test_input in test_inputs] self.assert_output_is_correct(self._quaternion_slerp_helper, test_inputs, test_outputs, tile=False) def test_unnormalized_quaternion_weights_exception_raised(self): """Tests if quaternion_weights raise exceptions for unnormalized input.""" q1 = self._pick_random_quaternion() q2 = tf.nn.l2_normalize(q1, axis=-1) p = tf.constant((0.5), dtype=q1.dtype) with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate(slerp.quaternion_weights(q1, q2, p)) @parameterized.parameters( ((4,), (4,), (1,)), ((None, 4), (None, 4), (None, 1)), ((None, 4), (None, 4), (None, 4)), ) def test_quaternion_weights_exception_not_raised(self, *shapes): """Tests that valid input shapes do not raise exceptions for qslerp.""" self.assert_exception_is_not_raised(slerp.quaternion_weights, shapes) @parameterized.parameters( ("must have exactly 4 dimensions in axis -1", (3,), (4,), (1,)), ("must have exactly 4 dimensions in axis -1", (4,), (3,), (1,)), ("Not all batch dimensions are broadcast-compatible.", (2, 4), (3, 4), (1,)), ("Not all batch dimensions are broadcast-compatible.", (1, 4), (3, 4), (2,)), ) def test_quaternion_weights_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised for qslerp.""" self.assert_exception_is_raised(slerp.quaternion_weights, error_msg, shapes) @parameterized.parameters( # Same quaternions (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)), ( (0.25,), (0.75,), )), # Anti-polar - small percent (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.2,)), ( (-0.8,), (0.2,), )), # Anti-polar - large percent (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.8,)), ( (-0.2,), (0.8,), )), ) def test_quaternion_weights_preset(self, test_inputs, test_outputs): """Tests the accuracy of quaternion_weights for problem cases.""" test_inputs = [np.array(test_input).astype(np.float32) for test_input in test_inputs] self.assert_output_is_correct(slerp.quaternion_weights, test_inputs, test_outputs, tile=False) @parameterized.parameters( ((3,), (3,), (1,)), ((None, 4), (None, 4), (None, 1)), ) def test_vector_weights_exception_not_raised(self, *shapes): """Tests that valid inputs do not raise exceptions for vector_weights.""" self.assert_exception_is_not_raised(slerp.vector_weights, shapes) @parameterized.parameters( ("must have the same number of dimensions in axes", (None, 3), (None, 4), (1,)), ("must have the same number of dimensions in axes", (2, 3), (2, 4), (1,)), ("Not all batch dimensions are broadcast-compatible.", (2, 3), (3, 3), (1,)), ("Not all batch dimensions are broadcast-compatible.", (1, 3), (3, 3), (2,)), ) def test_vector_weights_exception_raised(self, error_msg, *shapes): """Tests that shape exceptions are properly raised for vector_weights.""" self.assert_exception_is_raised(slerp.vector_weights, error_msg, shapes) @parameterized.parameters( # Orthogonal, same hemisphere (((1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.5,)), ((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),)), (((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)), ((0.5, 0.5, 0.5, 0.5),)), # Same hemisphere (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)), ((0.408248290463863, 0.0, 0.816496580927726, 0.408248290463863),)), # Same vectors (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)), ((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),)), # Anti-polar - equal weights (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.5,)), ((0.0, 0.0, 0.0, 0.0),)), # Anti-polar - small percent (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.25,)), ((0.5, 0.0, 0.5, 0.0),)), # Extrapolation - same hemisphere (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-1.0,)), ((0.0, -_SQRT2_DIV2, _SQRT2_DIV2, 0.0),)), # Extrapolation - opposite hemisphere (((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (-_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (1.5,)), ((-_SQRT2_DIV2, -0.0, -_SQRT2_DIV2, 0.0),)), # Unnormalized vectors (((4.0, 0.0), (0.0, 1.0), (0.5,)), ((2.82842712, _SQRT2_DIV2),)), ) def test_vector_slerp_preset(self, test_inputs, test_outputs): """Tests the accuracy of vector slerp results.""" test_inputs = [np.array(test_input).astype(np.float32) for test_input in test_inputs] self.assert_output_is_correct(self._vector_slerp_helper, test_inputs, test_outputs, tile=False) def test_vector_weights_reduce_to_lerp_preset(self): """Tests if vector slerp reduces to lerp for identical vectors as input.""" q1 = tf.constant((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0)) q2 = tf.constant((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0)) p = tf.constant((0.75,), dtype=q1.dtype) w1, w2 = slerp.vector_weights(q1, q2, p) self.assertAllClose(w1, (0.25,), rtol=1e-6) self.assertAllClose(w2, (0.75,), rtol=1e-6) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/datasets/features/camera_feature_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for tensorflow_graphics.datasets.features.camera_feature.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf import tensorflow_datasets as tfds from tensorflow_graphics.datasets.features import camera_feature class CameraFeatureTest(tfds.testing.FeatureExpectationsTestCase): """Test Cases for Camera FeatureConnector.""" def __get_camera_params(self): pose = {'R': np.eye(3).astype(np.float32), 't': np.zeros(3).astype(np.float32)} f = 35. optical_center = (640 / 2, 480 / 2) return pose, f, optical_center def test_simple_camera(self): """Tests camera parameters with fixed focal length, no skew and no aspect ratio.""" expected_pose, expected_f, expected_center = self.__get_camera_params() expected_intrinsics = np.asarray([[expected_f, 0, expected_center[0]], [0, expected_f, expected_center[1]], [0, 0, 1]], dtype=np.float32) expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics} inputs = {'f': expected_f, 'optical_center': expected_center, 'pose': expected_pose} lookat_inputs = { 'f': expected_f, 'optical_center': expected_center, 'pose': { 'look_at': np.array([0, 0, -1], dtype=np.float32), 'up': np.array([0, 1, 0], dtype=np.float32), 'position': np.array([0, 0, 0], dtype=np.float32) } } raising_pose_entry = { 'f': expected_f, 'optical_center': expected_center, 'pose': np.eye(4) } raising_pose_inputs = { 'f': expected_f, 'optical_center': expected_center, 'pose': {'rot': np.eye(3), 'trans': np.zeros(3)} } raising_lookat_inputs = { 'f': expected_f, 'optical_center': expected_center, 'pose': { 'l': np.array([0, 0, -1], dtype=np.float32), 'up': np.array([0, 1, 0], dtype=np.float32), 'C': np.array([0, 0, 0], dtype=np.float32) } } self.assertFeature( feature=camera_feature.Camera(), shape={ 'pose': { 'R': (3, 3), 't': (3,) }, 'intrinsics': (3, 3) }, dtype={ 'pose': { 'R': tf.float32, 't': tf.float32 }, 'intrinsics': tf.float32 }, tests=[ tfds.testing.FeatureExpectationItem( value=inputs, expected=expected_camera, ), tfds.testing.FeatureExpectationItem( value=lookat_inputs, expected=expected_camera ), tfds.testing.FeatureExpectationItem( value=raising_pose_inputs, raise_cls=ValueError, raise_msg='Wrong keys for pose feature provided' ), tfds.testing.FeatureExpectationItem( value=raising_lookat_inputs, raise_cls=ValueError, raise_msg='Wrong keys for pose feature provided' ), tfds.testing.FeatureExpectationItem( value=raising_pose_entry, raise_cls=ValueError, raise_msg='Pose needs to be a dictionary' ), ], ) def test_camera_with_aspect_ratio_and_skew(self): """Tests camera parameters with fixed focal length, aspect_ratio and skew.""" expected_pose, expected_f, expected_center = self.__get_camera_params() expected_aspect_ratio = expected_center[0] / expected_center[1] expected_skew = 0.6 expected_intrinsics = np.asarray( [[expected_f, expected_skew, expected_center[0]], [0, expected_aspect_ratio * expected_f, expected_center[1]], [0, 0, 1]], dtype=np.float32) expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics} inputs = {'f': expected_f, 'optical_center': expected_center, 'skew': expected_skew, 'aspect_ratio': expected_aspect_ratio, 'pose': expected_pose} self.assertFeature( feature=camera_feature.Camera(), shape={ 'pose': { 'R': (3, 3), 't': (3,) }, 'intrinsics': (3, 3) }, dtype={ 'pose': { 'R': tf.float32, 't': tf.float32 }, 'intrinsics': tf.float32 }, tests=[ tfds.testing.FeatureExpectationItem( value=inputs, expected=expected_camera, ), ], ) def test_full_camera_calibration_matrix(self): """Tests camera parameters with different focal length per camera axis and skew.""" expected_pose, _, expected_optical_center = self.__get_camera_params() expected_skew = 0.6 expected_f = (35., 40.) expected_intrinsics = np.array( [[expected_f[0], expected_skew, expected_optical_center[0]], [0, expected_f[1], expected_optical_center[1]], [0, 0, 1]], dtype=np.float32) expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics} inputs = {'f': expected_f, 'optical_center': expected_optical_center, 'skew': expected_skew, 'pose': expected_pose} raising_inputs = {'f': expected_f, 'aspect_ratio': 1.5, 'optical_center': expected_optical_center, 'skew': expected_skew, 'pose': expected_pose} self.assertFeature( feature=camera_feature.Camera(), shape={ 'pose': { 'R': (3, 3), 't': (3,) }, 'intrinsics': (3, 3) }, dtype={ 'pose': { 'R': tf.float32, 't': tf.float32 }, 'intrinsics': tf.float32 }, tests=[ tfds.testing.FeatureExpectationItem( value=inputs, expected=expected_camera, ), tfds.testing.FeatureExpectationItem( value=raising_inputs, raise_cls=ValueError, raise_msg='If aspect ratio is provided, f needs to ' 'be a single float', ), ], ) if __name__ == '__main__': tfds.testing.test_main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for tensorflow_graphics.datasets.features.camera_feature.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf import tensorflow_datasets as tfds from tensorflow_graphics.datasets.features import camera_feature class CameraFeatureTest(tfds.testing.FeatureExpectationsTestCase): """Test Cases for Camera FeatureConnector.""" def __get_camera_params(self): pose = {'R': np.eye(3).astype(np.float32), 't': np.zeros(3).astype(np.float32)} f = 35. optical_center = (640 / 2, 480 / 2) return pose, f, optical_center def test_simple_camera(self): """Tests camera parameters with fixed focal length, no skew and no aspect ratio.""" expected_pose, expected_f, expected_center = self.__get_camera_params() expected_intrinsics = np.asarray([[expected_f, 0, expected_center[0]], [0, expected_f, expected_center[1]], [0, 0, 1]], dtype=np.float32) expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics} inputs = {'f': expected_f, 'optical_center': expected_center, 'pose': expected_pose} lookat_inputs = { 'f': expected_f, 'optical_center': expected_center, 'pose': { 'look_at': np.array([0, 0, -1], dtype=np.float32), 'up': np.array([0, 1, 0], dtype=np.float32), 'position': np.array([0, 0, 0], dtype=np.float32) } } raising_pose_entry = { 'f': expected_f, 'optical_center': expected_center, 'pose': np.eye(4) } raising_pose_inputs = { 'f': expected_f, 'optical_center': expected_center, 'pose': {'rot': np.eye(3), 'trans': np.zeros(3)} } raising_lookat_inputs = { 'f': expected_f, 'optical_center': expected_center, 'pose': { 'l': np.array([0, 0, -1], dtype=np.float32), 'up': np.array([0, 1, 0], dtype=np.float32), 'C': np.array([0, 0, 0], dtype=np.float32) } } self.assertFeature( feature=camera_feature.Camera(), shape={ 'pose': { 'R': (3, 3), 't': (3,) }, 'intrinsics': (3, 3) }, dtype={ 'pose': { 'R': tf.float32, 't': tf.float32 }, 'intrinsics': tf.float32 }, tests=[ tfds.testing.FeatureExpectationItem( value=inputs, expected=expected_camera, ), tfds.testing.FeatureExpectationItem( value=lookat_inputs, expected=expected_camera ), tfds.testing.FeatureExpectationItem( value=raising_pose_inputs, raise_cls=ValueError, raise_msg='Wrong keys for pose feature provided' ), tfds.testing.FeatureExpectationItem( value=raising_lookat_inputs, raise_cls=ValueError, raise_msg='Wrong keys for pose feature provided' ), tfds.testing.FeatureExpectationItem( value=raising_pose_entry, raise_cls=ValueError, raise_msg='Pose needs to be a dictionary' ), ], ) def test_camera_with_aspect_ratio_and_skew(self): """Tests camera parameters with fixed focal length, aspect_ratio and skew.""" expected_pose, expected_f, expected_center = self.__get_camera_params() expected_aspect_ratio = expected_center[0] / expected_center[1] expected_skew = 0.6 expected_intrinsics = np.asarray( [[expected_f, expected_skew, expected_center[0]], [0, expected_aspect_ratio * expected_f, expected_center[1]], [0, 0, 1]], dtype=np.float32) expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics} inputs = {'f': expected_f, 'optical_center': expected_center, 'skew': expected_skew, 'aspect_ratio': expected_aspect_ratio, 'pose': expected_pose} self.assertFeature( feature=camera_feature.Camera(), shape={ 'pose': { 'R': (3, 3), 't': (3,) }, 'intrinsics': (3, 3) }, dtype={ 'pose': { 'R': tf.float32, 't': tf.float32 }, 'intrinsics': tf.float32 }, tests=[ tfds.testing.FeatureExpectationItem( value=inputs, expected=expected_camera, ), ], ) def test_full_camera_calibration_matrix(self): """Tests camera parameters with different focal length per camera axis and skew.""" expected_pose, _, expected_optical_center = self.__get_camera_params() expected_skew = 0.6 expected_f = (35., 40.) expected_intrinsics = np.array( [[expected_f[0], expected_skew, expected_optical_center[0]], [0, expected_f[1], expected_optical_center[1]], [0, 0, 1]], dtype=np.float32) expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics} inputs = {'f': expected_f, 'optical_center': expected_optical_center, 'skew': expected_skew, 'pose': expected_pose} raising_inputs = {'f': expected_f, 'aspect_ratio': 1.5, 'optical_center': expected_optical_center, 'skew': expected_skew, 'pose': expected_pose} self.assertFeature( feature=camera_feature.Camera(), shape={ 'pose': { 'R': (3, 3), 't': (3,) }, 'intrinsics': (3, 3) }, dtype={ 'pose': { 'R': tf.float32, 't': tf.float32 }, 'intrinsics': tf.float32 }, tests=[ tfds.testing.FeatureExpectationItem( value=inputs, expected=expected_camera, ), tfds.testing.FeatureExpectationItem( value=raising_inputs, raise_cls=ValueError, raise_msg='If aspect ratio is provided, f needs to ' 'be a single float', ), ], ) if __name__ == '__main__': tfds.testing.test_main()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/io/triangle_mesh.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """A thin wrapper around the trimesh library for loading triangle meshes.""" import os import tensorflow as tf import trimesh from trimesh import Scene from trimesh import Trimesh # TODO(b/156115314): Revisit the library for loading the triangle meshes. class GFileResolver(trimesh.visual.resolvers.Resolver): """A resolver using gfile for accessing other assets in the mesh directory.""" def __init__(self, path): if tf.io.gfile.isdir(path): self.directory = path elif tf.io.gfile.exists(path): self.directory = os.path.dirname(path) else: raise ValueError('path is not a file or directory') def get(self, name): with tf.io.gfile.GFile(os.path.join(self.directory, name), 'rb') as f: data = f.read() return data def load(file_obj, file_type=None, **kwargs): """Loads a triangle mesh from the given GFile/file path. Args: file_obj: A tf.io.gfile.GFile object or a string specifying the mesh file path. file_type: A string specifying the type of the file (e.g. 'obj', 'stl'). If not specified the file_type will be inferred from the file name. **kwargs: Additional arguments that should be passed to trimesh.load(). Returns: A trimesh.Trimesh or trimesh.Scene. """ if isinstance(file_obj, str): with tf.io.gfile.GFile(file_obj, 'r') as f: if file_type is None: file_type = trimesh.util.split_extension(file_obj) return trimesh.load( file_obj=f, file_type=file_type, resolver=GFileResolver(file_obj), **kwargs) if trimesh.util.is_file(file_obj): if not hasattr(file_obj, 'name') or not file_obj.name: raise ValueError( 'file_obj must have attribute "name". Try passing the file name instead.' ) if file_type is None: file_type = trimesh.util.split_extension(file_obj.name) return trimesh.load( file_obj=file_obj, file_type=file_type, resolver=GFileResolver(file_obj.name), **kwargs) raise ValueError('file_obj should be either a file object or a string') __all__ = ['load', 'Trimesh', 'Scene']
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """A thin wrapper around the trimesh library for loading triangle meshes.""" import os import tensorflow as tf import trimesh from trimesh import Scene from trimesh import Trimesh # TODO(b/156115314): Revisit the library for loading the triangle meshes. class GFileResolver(trimesh.visual.resolvers.Resolver): """A resolver using gfile for accessing other assets in the mesh directory.""" def __init__(self, path): if tf.io.gfile.isdir(path): self.directory = path elif tf.io.gfile.exists(path): self.directory = os.path.dirname(path) else: raise ValueError('path is not a file or directory') def get(self, name): with tf.io.gfile.GFile(os.path.join(self.directory, name), 'rb') as f: data = f.read() return data def load(file_obj, file_type=None, **kwargs): """Loads a triangle mesh from the given GFile/file path. Args: file_obj: A tf.io.gfile.GFile object or a string specifying the mesh file path. file_type: A string specifying the type of the file (e.g. 'obj', 'stl'). If not specified the file_type will be inferred from the file name. **kwargs: Additional arguments that should be passed to trimesh.load(). Returns: A trimesh.Trimesh or trimesh.Scene. """ if isinstance(file_obj, str): with tf.io.gfile.GFile(file_obj, 'r') as f: if file_type is None: file_type = trimesh.util.split_extension(file_obj) return trimesh.load( file_obj=f, file_type=file_type, resolver=GFileResolver(file_obj), **kwargs) if trimesh.util.is_file(file_obj): if not hasattr(file_obj, 'name') or not file_obj.name: raise ValueError( 'file_obj must have attribute "name". Try passing the file name instead.' ) if file_type is None: file_type = trimesh.util.split_extension(file_obj.name) return trimesh.load( file_obj=file_obj, file_type=file_type, resolver=GFileResolver(file_obj.name), **kwargs) raise ValueError('file_obj should be either a file object or a string') __all__ = ['load', 'Trimesh', 'Scene']
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/datasets/shapenet/shapenet.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Shapenet Core dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import json import os import textwrap import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds from tensorflow_datasets import features as tfds_features from tensorflow_graphics.datasets import features as tfg_features _CITATION = """ @techreport{shapenet2015, title = {{ShapeNet: An Information-Rich 3D Model Repository}}, author = {Chang, Angel X. and Funkhouser, Thomas and Guibas, Leonidas and Hanrahan, Pat and Huang, Qixing and Li, Zimo and Savarese, Silvio and Savva, Manolis and Song, Shuran and Su, Hao and Xiao, Jianxiong and Yi, Li and Yu, Fisher}, number = {arXiv:1512.03012 [cs.GR]}, institution = {Stanford University --- Princeton University --- Toyota Technological Institute at Chicago}, year = {2015} } """ _DESCRIPTION = """ ShapeNetCore is a densely annotated subset of ShapeNet covering 55 common object categories with ~51,300 unique 3D models. Each model in ShapeNetCore is linked to an appropriate synset in WordNet (version 3.0). The synsets will be extracted from the taxonomy.json file in the ShapeNetCore.v2.zip archive and the splits from http://shapenet.cs.stanford.edu/shapenet/obj-zip/SHREC16/all.csv """ _TAXONOMY_FILE_NAME = 'taxonomy.json' _SPLIT_FILE_URL = \ 'http://shapenet.cs.stanford.edu/shapenet/obj-zip/SHREC16/all.csv' class ShapenetConfig(tfds.core.BuilderConfig): """Base class for Shapenet BuilderConfigs. The Shapenet database builder delegates the implementation of info, split_generators and generate_examples to the specified ShapenetConfig. This is done to allow multiple versions of the dataset. """ def info(self, dataset_builder): """Delegated Shapenet._info.""" raise NotImplementedError('Abstract method') def split_generators(self, dl_manager, dataset_builder): """Delegated Shapenet._split_generators.""" raise NotImplementedError('Abstract method') def generate_examples(self, **kwargs): """Delegated Shapenet._generate_examples.""" raise NotImplementedError('Abstract method') class MeshConfig(ShapenetConfig): """A Shapenet config for loading the original .obj files.""" _MODEL_SUBPATH = os.path.join('models', 'model_normalized.obj') def __init__(self, model_subpath=_MODEL_SUBPATH): super(MeshConfig, self).__init__( name='shapenet_trimesh', description=_DESCRIPTION, version=tfds.core.Version('1.0.0')) self.model_subpath = model_subpath def info(self, dataset_builder): return tfds.core.DatasetInfo( builder=dataset_builder, description=_DESCRIPTION, features=tfds_features.FeaturesDict({ 'trimesh': tfg_features.TriangleMesh(), 'label': tfds_features.ClassLabel(num_classes=353), 'model_id': tfds_features.Text(), }), supervised_keys=('trimesh', 'label'), # Homepage of the dataset for documentation homepage='https://shapenet.org/', citation=_CITATION, ) def split_generators(self, dl_manager, dataset_builder): # Extract the synset ids from the taxonomy file and update the ClassLabel # feature. with tf.io.gfile.GFile( os.path.join(dl_manager.manual_dir, _TAXONOMY_FILE_NAME)) as taxonomy_file: labels = [x['synsetId'] for x in json.loads(taxonomy_file.read())] # Remove duplicate labels (the json file contains two identical entries # for synset '04591713'). labels = list(collections.OrderedDict.fromkeys(labels)) dataset_builder.info.features['label'].names = labels split_file = dl_manager.download(_SPLIT_FILE_URL) fieldnames = ['id', 'synset', 'sub_synset', 'model_id', 'split'] model_items = collections.defaultdict(list) with tf.io.gfile.GFile(split_file) as csvfile: for row in csv.DictReader(csvfile, fieldnames): model_items[row['split']].append(row) return [ tfds.core.SplitGenerator( name=tfds.Split.TRAIN, gen_kwargs={ 'base_dir': dl_manager.manual_dir, 'models': model_items['train'] }, ), tfds.core.SplitGenerator( name=tfds.Split.TEST, gen_kwargs={ 'base_dir': dl_manager.manual_dir, 'models': model_items['test'] }, ), tfds.core.SplitGenerator( name=tfds.Split.VALIDATION, gen_kwargs={ 'base_dir': dl_manager.manual_dir, 'models': model_items['val'] }, ), ] def generate_examples(self, base_dir, models): """Yields examples. The structure of the examples: { 'trimesh': tensorflow_graphics.datasets.features.TriangleMesh 'label': tensorflow_datasets.features.ClassLabel 'model_id': tensorflow_datasets.features.Text } Args: base_dir: The base directory of shapenet. models: The list of models in the split. """ for model in models: synset = model['synset'] model_id = model['model_id'] model_filepath = os.path.join(base_dir, synset, model_id, self.model_subpath) # If the model doesn't exist, skip it. if not tf.io.gfile.exists(model_filepath): continue yield model_id, { 'trimesh': model_filepath, 'label': synset, 'model_id': model_id, } class Shapenet(tfds.core.GeneratorBasedBuilder): """ShapeNetCore V2. Example usage of the dataset: import tensorflow_datasets as tfds from tensorflow_graphics.datasets.shapenet import Shapenet data_set = Shapenet.load( split='train', download_and_prepare_kwargs={ 'download_config': tfds.download.DownloadConfig(manual_dir='~/shapenet_base') }) for example in data_set.take(1): trimesh, label, model_id = example['trimesh'], example['label'], example['model_id'] """ BUILDER_CONFIGS = [MeshConfig()] VERSION = tfds.core.Version('1.0.0') @staticmethod def load(*args, **kwargs): return tfds.load('shapenet', *args, **kwargs) # pytype: disable=wrong-arg-count MANUAL_DOWNLOAD_INSTRUCTIONS = textwrap.dedent("""\ manual_dir should contain the extracted ShapeNetCore.v2.zip archive. You need to register on https://shapenet.org/download/shapenetcore in order to get the link to download the dataset. """) def _info(self): return self.builder_config.info(self) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" return self.builder_config.split_generators(dl_manager, self) def _generate_examples(self, **kwargs): """Yields examples.""" return self.builder_config.generate_examples(**kwargs)
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Shapenet Core dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import json import os import textwrap import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds from tensorflow_datasets import features as tfds_features from tensorflow_graphics.datasets import features as tfg_features _CITATION = """ @techreport{shapenet2015, title = {{ShapeNet: An Information-Rich 3D Model Repository}}, author = {Chang, Angel X. and Funkhouser, Thomas and Guibas, Leonidas and Hanrahan, Pat and Huang, Qixing and Li, Zimo and Savarese, Silvio and Savva, Manolis and Song, Shuran and Su, Hao and Xiao, Jianxiong and Yi, Li and Yu, Fisher}, number = {arXiv:1512.03012 [cs.GR]}, institution = {Stanford University --- Princeton University --- Toyota Technological Institute at Chicago}, year = {2015} } """ _DESCRIPTION = """ ShapeNetCore is a densely annotated subset of ShapeNet covering 55 common object categories with ~51,300 unique 3D models. Each model in ShapeNetCore is linked to an appropriate synset in WordNet (version 3.0). The synsets will be extracted from the taxonomy.json file in the ShapeNetCore.v2.zip archive and the splits from http://shapenet.cs.stanford.edu/shapenet/obj-zip/SHREC16/all.csv """ _TAXONOMY_FILE_NAME = 'taxonomy.json' _SPLIT_FILE_URL = \ 'http://shapenet.cs.stanford.edu/shapenet/obj-zip/SHREC16/all.csv' class ShapenetConfig(tfds.core.BuilderConfig): """Base class for Shapenet BuilderConfigs. The Shapenet database builder delegates the implementation of info, split_generators and generate_examples to the specified ShapenetConfig. This is done to allow multiple versions of the dataset. """ def info(self, dataset_builder): """Delegated Shapenet._info.""" raise NotImplementedError('Abstract method') def split_generators(self, dl_manager, dataset_builder): """Delegated Shapenet._split_generators.""" raise NotImplementedError('Abstract method') def generate_examples(self, **kwargs): """Delegated Shapenet._generate_examples.""" raise NotImplementedError('Abstract method') class MeshConfig(ShapenetConfig): """A Shapenet config for loading the original .obj files.""" _MODEL_SUBPATH = os.path.join('models', 'model_normalized.obj') def __init__(self, model_subpath=_MODEL_SUBPATH): super(MeshConfig, self).__init__( name='shapenet_trimesh', description=_DESCRIPTION, version=tfds.core.Version('1.0.0')) self.model_subpath = model_subpath def info(self, dataset_builder): return tfds.core.DatasetInfo( builder=dataset_builder, description=_DESCRIPTION, features=tfds_features.FeaturesDict({ 'trimesh': tfg_features.TriangleMesh(), 'label': tfds_features.ClassLabel(num_classes=353), 'model_id': tfds_features.Text(), }), supervised_keys=('trimesh', 'label'), # Homepage of the dataset for documentation homepage='https://shapenet.org/', citation=_CITATION, ) def split_generators(self, dl_manager, dataset_builder): # Extract the synset ids from the taxonomy file and update the ClassLabel # feature. with tf.io.gfile.GFile( os.path.join(dl_manager.manual_dir, _TAXONOMY_FILE_NAME)) as taxonomy_file: labels = [x['synsetId'] for x in json.loads(taxonomy_file.read())] # Remove duplicate labels (the json file contains two identical entries # for synset '04591713'). labels = list(collections.OrderedDict.fromkeys(labels)) dataset_builder.info.features['label'].names = labels split_file = dl_manager.download(_SPLIT_FILE_URL) fieldnames = ['id', 'synset', 'sub_synset', 'model_id', 'split'] model_items = collections.defaultdict(list) with tf.io.gfile.GFile(split_file) as csvfile: for row in csv.DictReader(csvfile, fieldnames): model_items[row['split']].append(row) return [ tfds.core.SplitGenerator( name=tfds.Split.TRAIN, gen_kwargs={ 'base_dir': dl_manager.manual_dir, 'models': model_items['train'] }, ), tfds.core.SplitGenerator( name=tfds.Split.TEST, gen_kwargs={ 'base_dir': dl_manager.manual_dir, 'models': model_items['test'] }, ), tfds.core.SplitGenerator( name=tfds.Split.VALIDATION, gen_kwargs={ 'base_dir': dl_manager.manual_dir, 'models': model_items['val'] }, ), ] def generate_examples(self, base_dir, models): """Yields examples. The structure of the examples: { 'trimesh': tensorflow_graphics.datasets.features.TriangleMesh 'label': tensorflow_datasets.features.ClassLabel 'model_id': tensorflow_datasets.features.Text } Args: base_dir: The base directory of shapenet. models: The list of models in the split. """ for model in models: synset = model['synset'] model_id = model['model_id'] model_filepath = os.path.join(base_dir, synset, model_id, self.model_subpath) # If the model doesn't exist, skip it. if not tf.io.gfile.exists(model_filepath): continue yield model_id, { 'trimesh': model_filepath, 'label': synset, 'model_id': model_id, } class Shapenet(tfds.core.GeneratorBasedBuilder): """ShapeNetCore V2. Example usage of the dataset: import tensorflow_datasets as tfds from tensorflow_graphics.datasets.shapenet import Shapenet data_set = Shapenet.load( split='train', download_and_prepare_kwargs={ 'download_config': tfds.download.DownloadConfig(manual_dir='~/shapenet_base') }) for example in data_set.take(1): trimesh, label, model_id = example['trimesh'], example['label'], example['model_id'] """ BUILDER_CONFIGS = [MeshConfig()] VERSION = tfds.core.Version('1.0.0') @staticmethod def load(*args, **kwargs): return tfds.load('shapenet', *args, **kwargs) # pytype: disable=wrong-arg-count MANUAL_DOWNLOAD_INSTRUCTIONS = textwrap.dedent("""\ manual_dir should contain the extracted ShapeNetCore.v2.zip archive. You need to register on https://shapenet.org/download/shapenetcore in order to get the link to download the dataset. """) def _info(self): return self.builder_config.info(self) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" return self.builder_config.split_generators(dl_manager, self) def _generate_examples(self, **kwargs): """Yields examples.""" return self.builder_config.generate_examples(**kwargs)
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/rendering/light/point_light.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements the rendering equation for a point light.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import tensorflow as tf from tensorflow_graphics.math import vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def estimate_radiance(point_light_radiance, point_light_position, surface_point_position, surface_point_normal, observation_point, brdf, name=None, reflected_light_fall_off=False): """Estimates the spectral radiance of a point light reflected from the surface point towards the observation point. Note: In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. B1 to Bm are optional batch dimensions for the lights, which must be broadcast compatible. Note: In case the light or the observation point are located behind the surface the function will return 0. Note: The gradient of this function is not smooth when the dot product of the normal with the light-to-surface or surface-to-observation vectors is 0. Args: point_light_radiance: A tensor of shape '[B1, ..., Bm, K]', where the last axis represents the radiance of the point light at a specific wave length. point_light_position: A tensor of shape `[B1, ..., Bm, 3]`, where the last axis represents the position of the point light. surface_point_position: A tensor of shape `[A1, ..., An, 3]`, where the last axis represents the position of the surface point. surface_point_normal: A tensor of shape `[A1, ..., An, 3]`, where the last axis represents the normalized surface normal at the given surface point. observation_point: A tensor of shape `[A1, ..., An, 3]`, where the last axis represents the observation point. brdf: The BRDF of the surface as a function of: incoming_light_direction - The incoming light direction as the last axis of a tensor with shape `[A1, ..., An, 3]`. outgoing_light_direction - The outgoing light direction as the last axis of a tensor with shape `[A1, ..., An, 3]`. surface_point_normal - The surface normal as the last axis of a tensor with shape `[A1, ..., An, 3]`. Note - The BRDF should return a tensor of size '[A1, ..., An, K]' where the last axis represents the amount of reflected light in each wave length. name: A name for this op. Defaults to "estimate_radiance". reflected_light_fall_off: A boolean specifying whether or not to include the fall off of the light reflected from the surface towards the observation point in the calculation. Defaults to False. Returns: A tensor of shape `[A1, ..., An, B1, ..., Bm, K]`, where the last axis represents the amount of light received at the observation point after being reflected from the given surface point. Raises: ValueError: if the shape of `point_light_position`, `surface_point_position`, `surface_point_normal`, or `observation_point` is not supported. InvalidArgumentError: if 'surface_point_normal' is not normalized. """ with tf.compat.v1.name_scope(name, "estimate_radiance", [ point_light_radiance, point_light_position, surface_point_position, surface_point_normal, observation_point, brdf ]): point_light_radiance = tf.convert_to_tensor(value=point_light_radiance) point_light_position = tf.convert_to_tensor(value=point_light_position) surface_point_position = tf.convert_to_tensor(value=surface_point_position) surface_point_normal = tf.convert_to_tensor(value=surface_point_normal) observation_point = tf.convert_to_tensor(value=observation_point) shape.check_static( tensor=point_light_position, tensor_name="point_light_position", has_dim_equals=(-1, 3)) shape.check_static( tensor=surface_point_position, tensor_name="surface_point_position", has_dim_equals=(-1, 3)) shape.check_static( tensor=surface_point_normal, tensor_name="surface_point_normal", has_dim_equals=(-1, 3)) shape.check_static( tensor=observation_point, tensor_name="observation_point", has_dim_equals=(-1, 3)) shape.compare_batch_dimensions( tensors=(surface_point_position, surface_point_normal, observation_point), tensor_names=("surface_point_position", "surface_point_normal", "observation_point"), last_axes=-2, broadcast_compatible=True) shape.compare_batch_dimensions( tensors=(point_light_radiance, point_light_position), tensor_names=("point_light_radiance", "point_light_position"), last_axes=-2, broadcast_compatible=True) surface_point_normal = asserts.assert_normalized(surface_point_normal) # Get the number of lights dimensions (B1,...,Bm). lights_num_dimensions = max( len(point_light_radiance.shape), len(point_light_position.shape)) - 1 # Reshape the other parameters so they can be broadcasted to the output of # shape [A1,...,An, B1,...,Bm, K]. surface_point_position = tf.reshape( surface_point_position, surface_point_position.shape[:-1] + (1,) * lights_num_dimensions + (3,)) surface_point_normal = tf.reshape( surface_point_normal, surface_point_normal.shape[:-1] + (1,) * lights_num_dimensions + (3,)) observation_point = tf.reshape( observation_point, observation_point.shape[:-1] + (1,) * lights_num_dimensions + (3,)) light_to_surface_point = surface_point_position - point_light_position distance_light_surface_point = tf.norm( tensor=light_to_surface_point, axis=-1, keepdims=True) incoming_light_direction = tf.math.l2_normalize( light_to_surface_point, axis=-1) surface_to_observation_point = observation_point - surface_point_position outgoing_light_direction = tf.math.l2_normalize( surface_to_observation_point, axis=-1) brdf_value = brdf(incoming_light_direction, outgoing_light_direction, surface_point_normal) incoming_light_dot_surface_normal = vector.dot(-incoming_light_direction, surface_point_normal) outgoing_light_dot_surface_normal = vector.dot(outgoing_light_direction, surface_point_normal) estimated_radiance = (point_light_radiance * \ brdf_value * incoming_light_dot_surface_normal) / \ (4. * math.pi * tf.math.square(distance_light_surface_point)) if reflected_light_fall_off: distance_surface_observation_point = tf.norm( tensor=surface_to_observation_point, axis=-1, keepdims=True) estimated_radiance = estimated_radiance / \ tf.math.square(distance_surface_observation_point) # Create a condition for checking whether the light or observation point are # behind the surface. min_dot = tf.minimum(incoming_light_dot_surface_normal, outgoing_light_dot_surface_normal) common_shape = shape.get_broadcasted_shape(min_dot.shape, estimated_radiance.shape) d_val = lambda dim: 1 if dim is None else tf.compat.v1.dimension_value(dim) common_shape = [d_val(dim) for dim in common_shape] condition = tf.broadcast_to(tf.greater_equal(min_dot, 0.0), common_shape) return tf.compat.v1.where(condition, estimated_radiance, tf.zeros_like(estimated_radiance)) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements the rendering equation for a point light.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import tensorflow as tf from tensorflow_graphics.math import vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def estimate_radiance(point_light_radiance, point_light_position, surface_point_position, surface_point_normal, observation_point, brdf, name=None, reflected_light_fall_off=False): """Estimates the spectral radiance of a point light reflected from the surface point towards the observation point. Note: In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. B1 to Bm are optional batch dimensions for the lights, which must be broadcast compatible. Note: In case the light or the observation point are located behind the surface the function will return 0. Note: The gradient of this function is not smooth when the dot product of the normal with the light-to-surface or surface-to-observation vectors is 0. Args: point_light_radiance: A tensor of shape '[B1, ..., Bm, K]', where the last axis represents the radiance of the point light at a specific wave length. point_light_position: A tensor of shape `[B1, ..., Bm, 3]`, where the last axis represents the position of the point light. surface_point_position: A tensor of shape `[A1, ..., An, 3]`, where the last axis represents the position of the surface point. surface_point_normal: A tensor of shape `[A1, ..., An, 3]`, where the last axis represents the normalized surface normal at the given surface point. observation_point: A tensor of shape `[A1, ..., An, 3]`, where the last axis represents the observation point. brdf: The BRDF of the surface as a function of: incoming_light_direction - The incoming light direction as the last axis of a tensor with shape `[A1, ..., An, 3]`. outgoing_light_direction - The outgoing light direction as the last axis of a tensor with shape `[A1, ..., An, 3]`. surface_point_normal - The surface normal as the last axis of a tensor with shape `[A1, ..., An, 3]`. Note - The BRDF should return a tensor of size '[A1, ..., An, K]' where the last axis represents the amount of reflected light in each wave length. name: A name for this op. Defaults to "estimate_radiance". reflected_light_fall_off: A boolean specifying whether or not to include the fall off of the light reflected from the surface towards the observation point in the calculation. Defaults to False. Returns: A tensor of shape `[A1, ..., An, B1, ..., Bm, K]`, where the last axis represents the amount of light received at the observation point after being reflected from the given surface point. Raises: ValueError: if the shape of `point_light_position`, `surface_point_position`, `surface_point_normal`, or `observation_point` is not supported. InvalidArgumentError: if 'surface_point_normal' is not normalized. """ with tf.compat.v1.name_scope(name, "estimate_radiance", [ point_light_radiance, point_light_position, surface_point_position, surface_point_normal, observation_point, brdf ]): point_light_radiance = tf.convert_to_tensor(value=point_light_radiance) point_light_position = tf.convert_to_tensor(value=point_light_position) surface_point_position = tf.convert_to_tensor(value=surface_point_position) surface_point_normal = tf.convert_to_tensor(value=surface_point_normal) observation_point = tf.convert_to_tensor(value=observation_point) shape.check_static( tensor=point_light_position, tensor_name="point_light_position", has_dim_equals=(-1, 3)) shape.check_static( tensor=surface_point_position, tensor_name="surface_point_position", has_dim_equals=(-1, 3)) shape.check_static( tensor=surface_point_normal, tensor_name="surface_point_normal", has_dim_equals=(-1, 3)) shape.check_static( tensor=observation_point, tensor_name="observation_point", has_dim_equals=(-1, 3)) shape.compare_batch_dimensions( tensors=(surface_point_position, surface_point_normal, observation_point), tensor_names=("surface_point_position", "surface_point_normal", "observation_point"), last_axes=-2, broadcast_compatible=True) shape.compare_batch_dimensions( tensors=(point_light_radiance, point_light_position), tensor_names=("point_light_radiance", "point_light_position"), last_axes=-2, broadcast_compatible=True) surface_point_normal = asserts.assert_normalized(surface_point_normal) # Get the number of lights dimensions (B1,...,Bm). lights_num_dimensions = max( len(point_light_radiance.shape), len(point_light_position.shape)) - 1 # Reshape the other parameters so they can be broadcasted to the output of # shape [A1,...,An, B1,...,Bm, K]. surface_point_position = tf.reshape( surface_point_position, surface_point_position.shape[:-1] + (1,) * lights_num_dimensions + (3,)) surface_point_normal = tf.reshape( surface_point_normal, surface_point_normal.shape[:-1] + (1,) * lights_num_dimensions + (3,)) observation_point = tf.reshape( observation_point, observation_point.shape[:-1] + (1,) * lights_num_dimensions + (3,)) light_to_surface_point = surface_point_position - point_light_position distance_light_surface_point = tf.norm( tensor=light_to_surface_point, axis=-1, keepdims=True) incoming_light_direction = tf.math.l2_normalize( light_to_surface_point, axis=-1) surface_to_observation_point = observation_point - surface_point_position outgoing_light_direction = tf.math.l2_normalize( surface_to_observation_point, axis=-1) brdf_value = brdf(incoming_light_direction, outgoing_light_direction, surface_point_normal) incoming_light_dot_surface_normal = vector.dot(-incoming_light_direction, surface_point_normal) outgoing_light_dot_surface_normal = vector.dot(outgoing_light_direction, surface_point_normal) estimated_radiance = (point_light_radiance * \ brdf_value * incoming_light_dot_surface_normal) / \ (4. * math.pi * tf.math.square(distance_light_surface_point)) if reflected_light_fall_off: distance_surface_observation_point = tf.norm( tensor=surface_to_observation_point, axis=-1, keepdims=True) estimated_radiance = estimated_radiance / \ tf.math.square(distance_surface_observation_point) # Create a condition for checking whether the light or observation point are # behind the surface. min_dot = tf.minimum(incoming_light_dot_surface_normal, outgoing_light_dot_surface_normal) common_shape = shape.get_broadcasted_shape(min_dot.shape, estimated_radiance.shape) d_val = lambda dim: 1 if dim is None else tf.compat.v1.dimension_value(dim) common_shape = [d_val(dim) for dim in common_shape] condition = tf.broadcast_to(tf.greater_equal(min_dot, 0.0), common_shape) return tf.compat.v1.where(condition, estimated_radiance, tf.zeros_like(estimated_radiance)) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/geometry/representation/mesh/tests/mesh_test_utils.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper routines for mesh unit tests. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np def create_single_triangle_mesh(): r"""Creates a single-triangle mesh, in the z=0 plane and facing +z. (0,1) 2 |\ | \ | \ (0,0) 0---1 (1,0) Returns: vertices: A [3, 3] float array faces: A [1, 3] int array """ vertices = np.array( ((0, 0, 0), (1, 0, 0), (0, 1, 0)), dtype=np.float32) faces = np.array(((0, 1, 2),), dtype=np.int32) return vertices, faces def create_square_triangle_mesh(): r"""Creates a square mesh, in the z=0 planse and facing +z. # (0,1) 2---3 (1,1) # |\ /| # | 4 | # |/ \| # (0,0) 0---1 (1,0) Returns: vertices: A [5, 3] float array faces: A [4, 3] int array """ vertices = np.array( ((0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0.5, 0.5, 0)), dtype=np.float32) faces = np.array( ((0, 1, 4), (1, 3, 4), (3, 2, 4), (2, 0, 4)), dtype=np.int32) return vertices, faces
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper routines for mesh unit tests. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np def create_single_triangle_mesh(): r"""Creates a single-triangle mesh, in the z=0 plane and facing +z. (0,1) 2 |\ | \ | \ (0,0) 0---1 (1,0) Returns: vertices: A [3, 3] float array faces: A [1, 3] int array """ vertices = np.array( ((0, 0, 0), (1, 0, 0), (0, 1, 0)), dtype=np.float32) faces = np.array(((0, 1, 2),), dtype=np.int32) return vertices, faces def create_square_triangle_mesh(): r"""Creates a square mesh, in the z=0 planse and facing +z. # (0,1) 2---3 (1,1) # |\ /| # | 4 | # |/ \| # (0,0) 0---1 (1,0) Returns: vertices: A [5, 3] float array faces: A [4, 3] int array """ vertices = np.array( ((0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0.5, 0.5, 0)), dtype=np.float32) faces = np.array( ((0, 1, 4), (1, 3, 4), (3, 2, 4), (2, 0, 4)), dtype=np.int32) return vertices, faces
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/datasets/features/camera_feature.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Camera feature.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow_datasets import features from tensorflow_graphics.datasets.features import pose_feature class Camera(features.FeaturesDict): """`FeatureConnector` for camera calibration (extrinsic and intrinsic). During `_generate_examples`, the feature connector accepts as input: * `parameter_dict:` A dictionary containing the extrinsic and instrinsic parameters of the camera as: - 'pose': Dictionary containing * Either 3x3 rotation matrix and translation vector: { 'R': A `float32` tensor with shape `[3, 3]` denoting the 3D rotation matrix. 't': A `float32` tensor with shape `[3,]` denoting the translation vector. } OR * look_at, position and up-vector: { 'look_at': float32 vector of shape (3,). 'position': float32 vector of shape (3,). 'up': float32 vector of shape (3,). } - 'f': focal length of the camera in pixel (either single float32 value or tuple of float32 as (f_x, f_y). - 'optical_center': Optical center of the camera in pixel coordinates as tuple (c_x, c_y) Optional parameters: - 'skew': float32 denoting the skew of the camera axes. - 'aspect_ratio': float32 denoting the aspect_ratio, if single fixed focal length is provided. Output: A dictionary containing: * 'pose': A `tensorflow_graphics.datasets.features.Pose` FeatureConnector representing the 3D pose of the camera. * 'intrinsics': A `float32` tensor with shape `[3,3]` denoting the intrinsic matrix. Example: Default values for skew (s) and aspect_ratio(a) are 0 and 1, respectively. Full calibration matrix: K = [[ f_x, s, c_x ], [ 0, f_y, c_y ], [ 0, 0, 1 ]] With same focal length: K = [[ f, s, c_x ], [ 0, af, c_y ], [ 0, 0, 1 ]] """ def __init__(self): super(Camera, self).__init__({ 'pose': pose_feature.Pose(), 'intrinsics': features.Tensor(shape=(3, 3), dtype=tf.float32), }) def encode_example(self, example_dict): """Convert the given parameters into a dict convertible to tf example.""" REQUIRED_KEYS = ['pose', 'f', 'optical_center'] # pylint: disable=invalid-name if not all(key in example_dict for key in REQUIRED_KEYS): raise ValueError(f'Missing keys in provided dictionary! ' f'Expected {REQUIRED_KEYS}, ' f'but {example_dict.keys()} were given.') if not isinstance(example_dict['pose'], dict): raise ValueError('Pose needs to be a dictionary containing either ' 'rotation and translation or look at, ' 'up vector and position.') features_dict = {} pose_dict = example_dict['pose'] if all(key in pose_dict for key in ['R', 't']): features_dict['pose'] = { 'R': pose_dict['R'], 't': pose_dict['t'] } elif all(key in pose_dict for key in ['look_at', 'position', 'up']): rotation = self._create_rotation_from_look_at(pose_dict['look_at'], pose_dict['position'], pose_dict['up']) translation = (-rotation) @ pose_dict['position'] features_dict['pose'] = { 'R': rotation, 't': translation } else: raise ValueError('Wrong keys for pose feature provided!') aspect_ratio = 1 skew = 0 if 'aspect_ratio' in example_dict.keys(): if not isinstance(example_dict['f'], float): raise ValueError('If aspect ratio is provided, ' 'f needs to be a single float.') aspect_ratio = example_dict['aspect_ratio'] if 'skew' in example_dict.keys(): skew = example_dict['skew'] features_dict['intrinsics'] = self._create_calibration_matrix( example_dict['f'], example_dict['optical_center'], aspect_ratio, skew ) return super(Camera, self).encode_example(features_dict) def _create_rotation_from_look_at(self, look_at, position, up): """Creates rotation matrix according to OpenGL gluLookAt convention. Args: look_at: A float32 3D vector of look_at direction. position: A float32 3D vector of camera position. up: A float32 3D up direction vector. Returns: A 3x3 float32 rotation matrix. (https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluLookAt.xml) """ dir_vec = look_at - position dir_vec /= np.linalg.norm(dir_vec) side_vec = np.cross(dir_vec, up) side_vec /= np.linalg.norm(side_vec) up_vec = np.cross(side_vec, dir_vec) matrix = np.array([side_vec, up_vec, -dir_vec]) return matrix def _create_calibration_matrix(self, f, optical_center, aspect_ratio=1, skew=0): """Constructs the 3x3 calibration matrix K. Args: f: Focal length of the camera. Either single float.32 value or tuple of float32 when different focal lengths for each axis are provided (fx, fy) optical_center: Tuple (c_x, c_y) containing the optical center of the camera in pixel coordinates. aspect_ratio: Optional parameter, if fixed focal length for both dimensions is used. Defaults to 1. skew: Optional parameter denoting the skew between the camera axes. Returns: float32 Tensor of shape [3,3] containing the upper triangular calibration matrix K. """ if not isinstance(optical_center, tuple): raise ValueError('Optical center of camera needs ' 'to be a tuple of (c_x, c_y).') if isinstance(f, tuple): f_x, f_y = f else: f_x = f f_y = aspect_ratio * f return np.asarray([[f_x, skew, optical_center[0]], [0, f_y, optical_center[1]], [0, 0, 1] ], dtype=np.float32) @classmethod def from_json_content(cls, value) -> 'Camera': return cls() def to_json_content(self): return {}
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Camera feature.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow_datasets import features from tensorflow_graphics.datasets.features import pose_feature class Camera(features.FeaturesDict): """`FeatureConnector` for camera calibration (extrinsic and intrinsic). During `_generate_examples`, the feature connector accepts as input: * `parameter_dict:` A dictionary containing the extrinsic and instrinsic parameters of the camera as: - 'pose': Dictionary containing * Either 3x3 rotation matrix and translation vector: { 'R': A `float32` tensor with shape `[3, 3]` denoting the 3D rotation matrix. 't': A `float32` tensor with shape `[3,]` denoting the translation vector. } OR * look_at, position and up-vector: { 'look_at': float32 vector of shape (3,). 'position': float32 vector of shape (3,). 'up': float32 vector of shape (3,). } - 'f': focal length of the camera in pixel (either single float32 value or tuple of float32 as (f_x, f_y). - 'optical_center': Optical center of the camera in pixel coordinates as tuple (c_x, c_y) Optional parameters: - 'skew': float32 denoting the skew of the camera axes. - 'aspect_ratio': float32 denoting the aspect_ratio, if single fixed focal length is provided. Output: A dictionary containing: * 'pose': A `tensorflow_graphics.datasets.features.Pose` FeatureConnector representing the 3D pose of the camera. * 'intrinsics': A `float32` tensor with shape `[3,3]` denoting the intrinsic matrix. Example: Default values for skew (s) and aspect_ratio(a) are 0 and 1, respectively. Full calibration matrix: K = [[ f_x, s, c_x ], [ 0, f_y, c_y ], [ 0, 0, 1 ]] With same focal length: K = [[ f, s, c_x ], [ 0, af, c_y ], [ 0, 0, 1 ]] """ def __init__(self): super(Camera, self).__init__({ 'pose': pose_feature.Pose(), 'intrinsics': features.Tensor(shape=(3, 3), dtype=tf.float32), }) def encode_example(self, example_dict): """Convert the given parameters into a dict convertible to tf example.""" REQUIRED_KEYS = ['pose', 'f', 'optical_center'] # pylint: disable=invalid-name if not all(key in example_dict for key in REQUIRED_KEYS): raise ValueError(f'Missing keys in provided dictionary! ' f'Expected {REQUIRED_KEYS}, ' f'but {example_dict.keys()} were given.') if not isinstance(example_dict['pose'], dict): raise ValueError('Pose needs to be a dictionary containing either ' 'rotation and translation or look at, ' 'up vector and position.') features_dict = {} pose_dict = example_dict['pose'] if all(key in pose_dict for key in ['R', 't']): features_dict['pose'] = { 'R': pose_dict['R'], 't': pose_dict['t'] } elif all(key in pose_dict for key in ['look_at', 'position', 'up']): rotation = self._create_rotation_from_look_at(pose_dict['look_at'], pose_dict['position'], pose_dict['up']) translation = (-rotation) @ pose_dict['position'] features_dict['pose'] = { 'R': rotation, 't': translation } else: raise ValueError('Wrong keys for pose feature provided!') aspect_ratio = 1 skew = 0 if 'aspect_ratio' in example_dict.keys(): if not isinstance(example_dict['f'], float): raise ValueError('If aspect ratio is provided, ' 'f needs to be a single float.') aspect_ratio = example_dict['aspect_ratio'] if 'skew' in example_dict.keys(): skew = example_dict['skew'] features_dict['intrinsics'] = self._create_calibration_matrix( example_dict['f'], example_dict['optical_center'], aspect_ratio, skew ) return super(Camera, self).encode_example(features_dict) def _create_rotation_from_look_at(self, look_at, position, up): """Creates rotation matrix according to OpenGL gluLookAt convention. Args: look_at: A float32 3D vector of look_at direction. position: A float32 3D vector of camera position. up: A float32 3D up direction vector. Returns: A 3x3 float32 rotation matrix. (https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluLookAt.xml) """ dir_vec = look_at - position dir_vec /= np.linalg.norm(dir_vec) side_vec = np.cross(dir_vec, up) side_vec /= np.linalg.norm(side_vec) up_vec = np.cross(side_vec, dir_vec) matrix = np.array([side_vec, up_vec, -dir_vec]) return matrix def _create_calibration_matrix(self, f, optical_center, aspect_ratio=1, skew=0): """Constructs the 3x3 calibration matrix K. Args: f: Focal length of the camera. Either single float.32 value or tuple of float32 when different focal lengths for each axis are provided (fx, fy) optical_center: Tuple (c_x, c_y) containing the optical center of the camera in pixel coordinates. aspect_ratio: Optional parameter, if fixed focal length for both dimensions is used. Defaults to 1. skew: Optional parameter denoting the skew between the camera axes. Returns: float32 Tensor of shape [3,3] containing the upper triangular calibration matrix K. """ if not isinstance(optical_center, tuple): raise ValueError('Optical center of camera needs ' 'to be a tuple of (c_x, c_y).') if isinstance(f, tuple): f_x, f_y = f else: f_x = f f_y = aspect_ratio * f return np.asarray([[f_x, skew, optical_center[0]], [0, f_y, optical_center[1]], [0, 0, 1] ], dtype=np.float32) @classmethod def from_json_content(cls, value) -> 'Camera': return cls() def to_json_content(self): return {}
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/rendering/camera/tests/perspective_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for perspective camera functionalities.""" import math import sys from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.rendering.camera import perspective from tensorflow_graphics.util import test_case class PerspectiveTest(test_case.TestCase): @parameterized.parameters( ("must have exactly 4 dimensions in axis -1", (4, 3)), ("must have exactly 4 dimensions in axis -2", (5, 4)), ("must have exactly 4 dimensions in axis -2", (None, 4)), ("must have exactly 4 dimensions in axis -1", (4, None)), ) def test_parameters_from_right_handed_shape_exception_raised( self, error_msg, *shapes): """Checks the inputs of the from_right_handed_shape function.""" self.assert_exception_is_raised(perspective.parameters_from_right_handed, error_msg, shapes) @parameterized.parameters( ((4, 4),), ((None, 4, 4),), ((None, None, 4, 4),), ) def test_parameters_from_right_handed_shape_exception_not_raised( self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised( perspective.parameters_from_right_handed, shapes) def test_parameters_from_right_handed_random(self): """Tests that parameters_from_right_handed returns the expected values.""" tensor_size = np.random.randint(2, 4) tensor_shape = np.random.randint(2, 5, size=(tensor_size)).tolist() vertical_field_of_view_gt = np.random.uniform( sys.float_info.epsilon, np.pi - sys.float_info.epsilon, tensor_shape + [1]) aspect_ratio_gt = np.random.uniform(0.1, 10.0, tensor_shape + [1]) near_gt = np.random.uniform(0.1, 100.0, tensor_shape + [1]) far_gt = near_gt + np.random.uniform(0.1, 100.0, tensor_shape + [1]) projection_matrix = perspective.right_handed(vertical_field_of_view_gt, aspect_ratio_gt, near_gt, far_gt) vertical_field_of_view_pred, aspect_ratio_pred, near_pred, far_pred = perspective.parameters_from_right_handed( projection_matrix) with self.subTest(name="vertical_field_of_view"): self.assertAllClose(vertical_field_of_view_gt, vertical_field_of_view_pred) with self.subTest(name="aspect_ratio"): self.assertAllClose(aspect_ratio_gt, aspect_ratio_pred) with self.subTest(name="near_plane"): self.assertAllClose(near_gt, near_pred) with self.subTest(name="far_plane"): self.assertAllClose(far_gt, far_pred) def test_parameters_from_right_handed_jacobian_random(self): """Tests the Jacobian of parameters_from_right_handed.""" tensor_size = np.random.randint(2, 4) tensor_shape = np.random.randint(2, 5, size=(tensor_size)).tolist() vertical_field_of_view = np.random.uniform(sys.float_info.epsilon, np.pi - sys.float_info.epsilon, tensor_shape + [1]) aspect_ratio = np.random.uniform(0.1, 10.0, tensor_shape + [1]) near = np.random.uniform(0.1, 100.0, tensor_shape + [1]) far = near + np.random.uniform(0.1, 100.0, tensor_shape + [1]) projection_matrix = perspective.right_handed(vertical_field_of_view, aspect_ratio, near, far) with self.subTest(name="vertical_field_of_view"): self.assert_jacobian_is_finite_fn( lambda x: perspective.parameters_from_right_handed(x)[0], [projection_matrix]) with self.subTest(name="aspect_ratio"): self.assert_jacobian_is_finite_fn( lambda x: perspective.parameters_from_right_handed(x)[1], [projection_matrix]) with self.subTest(name="near_plane"): self.assert_jacobian_is_finite_fn( lambda x: perspective.parameters_from_right_handed(x)[2], [projection_matrix]) with self.subTest(name="far_plane"): self.assert_jacobian_is_finite_fn( lambda x: perspective.parameters_from_right_handed(x)[3], [projection_matrix]) def test_perspective_right_handed_preset(self): """Tests that perspective_right_handed generates expected results.""" vertical_field_of_view = ((60.0 * math.pi / 180.0,), (50.0 * math.pi / 180.0,)) aspect_ratio = ((1.5,), (1.1,)) near = ((1.0,), (1.2,)) far = ((10.0,), (5.0,)) pred = perspective.right_handed(vertical_field_of_view, aspect_ratio, near, far) gt = (((1.15470052, 0.0, 0.0, 0.0), (0.0, 1.73205066, 0.0, 0.0), (0.0, 0.0, -1.22222221, -2.22222233), (0.0, 0.0, -1.0, 0.0)), ((1.9495517, 0.0, 0.0, 0.0), (0.0, 2.14450693, 0.0, 0.0), (0.0, 0.0, -1.63157892, -3.15789485), (0.0, 0.0, -1.0, 0.0))) self.assertAllClose(pred, gt) @parameterized.parameters( ((1,), (1,), (1,), (1,)), ((None, 1), (None, 1), (None, 1), (None, 1)), ((None, 3, 1), (None, 3, 1), (None, 3, 1), (None, 3, 1)), ) def test_perspective_right_handed_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(perspective.right_handed, shapes) @parameterized.parameters( ("Not all batch dimensions are identical", (1,), (3, 1), (3, 1), (3, 1)), ("Not all batch dimensions are identical", (3, 1), (None, 3, 1), (3, 1), (3, 1)), ) def test_perspective_right_handed_shape_exception_raised( self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(perspective.right_handed, error_msg, shapes) @parameterized.parameters( ((1.0,), (1.0,), np.random.uniform(-1.0, 0.0, size=(1,)).astype(np.float32), (1.0,)), ((1.0,), (1.0,), (0.0,), (1.0,)), ((1.0,), np.random.uniform(-1.0, 0.0, size=(1,)).astype(np.float32), (0.1,), (1.0,)), ((1.0,), (0.0,), (0.1,), (1.0,)), ((1.0,), (1.0,), np.random.uniform(1.0, 2.0, size=(1,)).astype(np.float32), np.random.uniform(0.1, 0.5, size=(1,)).astype(np.float32)), ((1.0,), (1.0,), (0.1,), (0.1,)), (np.random.uniform(-math.pi, 0.0, size=(1,)).astype(np.float32), (1.0,), (0.1,), (1.0,)), (np.random.uniform(math.pi, 2.0 * math.pi, size=(1,)).astype(np.float32), (1.0,), (0.1,), (1.0,)), ((0.0,), (1.0,), (0.1,), (1.0,)), ((math.pi,), (1.0,), (0.1,), (1.0,)), ) def test_perspective_right_handed_valid_range_exception_raised( self, vertical_field_of_view, aspect_ratio, near, far): """Tests that an exception is raised with out of bounds values.""" with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( perspective.right_handed(vertical_field_of_view, aspect_ratio, near, far)) def test_perspective_right_handed_cross_jacobian_preset(self): """Tests the Jacobian of perspective_right_handed.""" vertical_field_of_view_init = np.array((1.0,)) aspect_ratio_init = np.array((1.0,)) near_init = np.array((1.0,)) far_init = np.array((10.0,)) self.assert_jacobian_is_correct_fn( perspective.right_handed, [vertical_field_of_view_init, aspect_ratio_init, near_init, far_init]) def test_perspective_right_handed_cross_jacobian_random(self): """Tests the Jacobian of perspective_right_handed.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() eps = np.finfo(np.float64).eps vertical_field_of_view_init = np.random.uniform( eps, math.pi - eps, size=tensor_shape + [1]) aspect_ratio_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1]) near_init = np.random.uniform(eps, 10.0, size=tensor_shape + [1]) far_init = np.random.uniform(10 + eps, 100.0, size=tensor_shape + [1]) self.assert_jacobian_is_correct_fn( perspective.right_handed, [vertical_field_of_view_init, aspect_ratio_init, near_init, far_init]) @parameterized.parameters( ((3, 3),), ((3, 3, 3),), ((None, 3, 3),), ) def test_intrinsics_from_matrix_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(perspective.intrinsics_from_matrix, shapes) @parameterized.parameters( ("must have a rank greater than 1", (3,)), ("must have exactly 3 dimensions in axis -2", (None, 3)), ("must have exactly 3 dimensions in axis -1", (3, None)), ) def test_intrinsics_from_matrix_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(perspective.intrinsics_from_matrix, error_msg, shapes) @parameterized.parameters( ((((0., 0., 0.), (0., 0., 0.), (0., 0., 1.)),), ((0., 0.), (0., 0.), (0.0,))), ((((1., 0., 3.), (0., 2., 4.), (0., 0., 1.)),), ((1., 2.), (3., 4.), (0.0,))), ) def test_intrinsics_from_matrix_preset(self, test_inputs, test_outputs): """Tests that intrinsics_from_matrix gives the correct result.""" self.assert_output_is_correct(perspective.intrinsics_from_matrix, test_inputs, test_outputs) def test_intrinsics_from_matrix_to_intrinsics_random(self): """Tests that converting intrinsics to a matrix and back is consistent.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() random_focal = np.random.normal(size=tensor_shape + [2]) random_principal_point = np.random.normal(size=tensor_shape + [2]) random_skew_coeff = np.random.normal(size=tensor_shape + [1]) matrix = perspective.matrix_from_intrinsics(random_focal, random_principal_point, random_skew_coeff) focal, principal_point, skew_coeff = perspective.intrinsics_from_matrix( matrix) random_skew_coeff = np.reshape(random_skew_coeff, (1, 1)) self.assertAllClose(random_focal, focal, rtol=1e-3) self.assertAllClose(random_principal_point, principal_point, rtol=1e-3) self.assertAllClose(random_skew_coeff, skew_coeff, rtol=1e-3) @parameterized.parameters( ((2,), (2,), (1,)), ((2, 2), (2, 2), (2, 1)), ((None, 2), (None, 2), (None, 1)), ) def test_matrix_from_intrinsics_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(perspective.matrix_from_intrinsics, shapes) @parameterized.parameters( ((2,), (2,)), ((2, 2), (2, 2)), ((None, 2), (None, 2)), ) def test_matrix_from_intrinsics_exception_not_raised_when_skew_not_passed( self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(perspective.matrix_from_intrinsics, shapes) @parameterized.parameters( ("must have exactly 2 dimensions in axis -1", (None,), (2,)), ("must have exactly 2 dimensions in axis -1", (2,), (None,)), ("Not all batch dimensions are identical.", (3, 2), (2, 2)), ) def test_matrix_from_intrinsics_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(perspective.matrix_from_intrinsics, error_msg, shapes) @parameterized.parameters( (((0.0, 0.0), (0.0, 0.0), (0.0,)), (((0.0, 0.0, 0.0), (0.0, 0.0, 0.0), (0.0, 0.0, 1.0)),)), (((1.0, 2.0), (3.0, 4.0), (0.0,)), (((1.0, 0.0, 3.0), (0.0, 2.0, 4.0), (0.0, 0.0, 1.0)),))) def test_matrix_from_intrinsics_preset(self, test_inputs, test_outputs): """Tests that matrix_from_intrinsics gives the correct result.""" self.assert_output_is_correct(perspective.matrix_from_intrinsics, test_inputs, test_outputs) def test_matrix_from_intrinsics_to_matrix_random(self): """Tests that converting a matrix to intrinsics and back is consistent.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() random_focal = np.random.normal(size=tensor_shape + [2]) random_principal_point = np.random.normal(size=tensor_shape + [2]) fx = random_focal[..., 0] fy = random_focal[..., 1] cx = random_principal_point[..., 0] cy = random_principal_point[..., 1] zero = np.zeros_like(fx) one = np.ones_like(fx) random_matrix = np.stack((fx, zero, cx, zero, fy, cy, zero, zero, one), axis=-1).reshape(tensor_shape + [3, 3]) focal, principal_point, skew_coefficient = perspective.intrinsics_from_matrix( random_matrix) matrix = perspective.matrix_from_intrinsics(focal, principal_point, skew_coefficient) self.assertAllClose(random_matrix, matrix, rtol=1e-3) @parameterized.parameters( ((3,), (2,), (2,)), ((2, 3), (2, 2), (2, 2)), ((2, 3), (2,), (2,)), ((None, 3), (None, 2), (None, 2)), ) def test_project_exception_not_exception_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(perspective.project, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (None,), (2,), (2,)), ("must have exactly 2 dimensions in axis -1", (3,), (None,), (2,)), ("must have exactly 2 dimensions in axis -1", (3,), (2,), (None,)), ("Not all batch dimensions are broadcast-compatible.", (3, 3), (2, 2), (2, 2)), ) def test_project_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(perspective.project, error_msg, shape) @parameterized.parameters( (((0., 0., 1.), (1., 1.), (0., 0.)), ((0., 0.),)), (((4., 2., 1.), (1., 1.), (-4., -2.)), ((0., 0.),)), (((4., 2., 10.), (1., 1.), (-.4, -.2)), ((0., 0.),)), (((4., 2., 10.), (2., 1.), (-.8, -.2)), ((0., 0.),)), (((4., 2., 10.), (2., 1.), (-.8, 0.)), ((0., .2),)), ) def test_project_preset(self, test_inputs, test_outputs): """Tests that the project function gives the correct result.""" self.assert_output_is_correct(perspective.project, test_inputs, test_outputs) def test_project_unproject_random(self): """Tests that projecting and unprojecting gives an identity mapping.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() random_point_3d = np.random.normal(size=tensor_shape + [3]) random_focal = np.random.normal(size=tensor_shape + [2]) random_principal_point = np.random.normal(size=tensor_shape + [2]) random_depth = np.expand_dims(random_point_3d[..., 2], axis=-1) point_2d = perspective.project(random_point_3d, random_focal, random_principal_point) point_3d = perspective.unproject(point_2d, random_depth, random_focal, random_principal_point) self.assertAllClose(random_point_3d, point_3d, rtol=1e-3) def test_project_ray_random(self): """Tests that that ray is pointing toward the correct location.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() random_point_3d = np.random.normal(size=tensor_shape + [3]) random_focal = np.random.normal(size=tensor_shape + [2]) random_principal_point = np.random.normal(size=tensor_shape + [2]) random_depth = np.expand_dims(random_point_3d[..., 2], axis=-1) point_2d = perspective.project(random_point_3d, random_focal, random_principal_point) ray_3d = perspective.ray(point_2d, random_focal, random_principal_point) ray_3d = random_depth * ray_3d self.assertAllClose(random_point_3d, ray_3d, rtol=1e-3) @parameterized.parameters( ((2,), (2,), (2,)), ((2, 2), (2, 2), (2, 2)), ((3, 2), (1, 2), (2,)), ((None, 2), (None, 2), (None, 2)), ) def test_ray_exception_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(perspective.ray, shapes) @parameterized.parameters( ("must have exactly 2 dimensions in axis -1", (None,), (2,), (2,)), ("must have exactly 2 dimensions in axis -1", (2,), (None,), (2,)), ("must have exactly 2 dimensions in axis -1", (2,), (2,), (None,)), ("Not all batch dimensions are broadcast-compatible.", (3, 2), (1, 2), (2, 2)), ) def test_ray_exception_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(perspective.ray, error_msg, shapes) @parameterized.parameters( (((0., 0.), (1., 1.), (0., 0.)), ((0., 0., 1.),)), (((0., 0.), (1., 1.), (-1., -2.)), ((1., 2., 1.),)), (((0., 0.), (10., 1.), (-1., -2.)), ((.1, 2., 1.),)), (((-2., -4.), (10., 1.), (-3., -6.)), ((.1, 2., 1.),)), ) def test_ray_preset(self, test_inputs, test_outputs): """Tests that the ray function gives the correct result.""" self.assert_output_is_correct(perspective.ray, test_inputs, test_outputs) def test_ray_project_random(self): """Tests that the end point of the ray projects at the good location.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() random_point_2d = np.random.normal(size=tensor_shape + [2]) random_focal = np.random.normal(size=tensor_shape + [2]) random_principal_point = np.random.normal(size=tensor_shape + [2]) ray_3d = perspective.ray(random_point_2d, random_focal, random_principal_point) point_2d = perspective.project(ray_3d, random_focal, random_principal_point) self.assertAllClose(random_point_2d, point_2d, rtol=1e-3) @parameterized.parameters( ((2,), (1,), (2,), (2,)), ((2, 2), (2, 1), (2, 2), (2, 2)), ((None, 2), (None, 1), (None, 2), (None, 2)), ) def test_unproject_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(perspective.unproject, shapes) @parameterized.parameters( ("must have exactly 2 dimensions in axis -1", (None,), (1,), (2,), (2,)), ("must have exactly 1 dimensions in axis -1", (2,), (None,), (2,), (2,)), ("must have exactly 2 dimensions in axis -1", (2,), (1,), (None,), (2,)), ("must have exactly 2 dimensions in axis -1", (2,), (1,), (2,), (None,)), ("Not all batch dimensions are identical.", (1, 2), (2, 1), (2, 2), (2, 2)), ) def test_unproject_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(perspective.unproject, error_msg, shapes) @parameterized.parameters( (((0., 0.), (1.,), (1., 1.), (0., 0.)), ((0., 0., 1.),)), (((0., 0.), (1.,), (1., 1.), (-4., -2.)), ((4., 2., 1.),)), (((0., 0.), (10.,), (1., 1.), (-.4, -.2)), ((4., 2., 10.),)), (((0., 0.), (10.,), (2., 1.), (-.8, -.2)), ((4., 2., 10.),)), (((0., .2), (10.,), (2., 1.), (-.8, 0.)), ((4., 2., 10.),)), ) def test_unproject_preset(self, test_inputs, test_outputs): """Tests that the unproject function gives the correct result.""" self.assert_output_is_correct(perspective.unproject, test_inputs, test_outputs) def test_unproject_project_random(self): """Tests that unprojecting and projecting gives and identity mapping.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() random_point_2d = np.random.normal(size=tensor_shape + [2]) random_focal = np.random.normal(size=tensor_shape + [2]) random_principal_point = np.random.normal(size=tensor_shape + [2]) random_depth = np.random.normal(size=tensor_shape + [1]) point_3d = perspective.unproject(random_point_2d, random_depth, random_focal, random_principal_point) point_2d = perspective.project(point_3d, random_focal, random_principal_point) self.assertAllClose(random_point_2d, point_2d, rtol=1e-3) def test_unproject_ray_random(self): """Tests that that ray is pointing toward the correct location.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() random_point_2d = np.random.normal(size=tensor_shape + [2]) random_focal = np.random.normal(size=tensor_shape + [2]) random_principal_point = np.random.normal(size=tensor_shape + [2]) random_depth = np.random.normal(size=tensor_shape + [1]) point_3d = perspective.unproject(random_point_2d, random_depth, random_focal, random_principal_point) ray_3d = perspective.ray(random_point_2d, random_focal, random_principal_point) ray_3d = random_depth * ray_3d self.assertAllClose(point_3d, ray_3d, rtol=1e-3) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for perspective camera functionalities.""" import math import sys from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.rendering.camera import perspective from tensorflow_graphics.util import test_case class PerspectiveTest(test_case.TestCase): @parameterized.parameters( ("must have exactly 4 dimensions in axis -1", (4, 3)), ("must have exactly 4 dimensions in axis -2", (5, 4)), ("must have exactly 4 dimensions in axis -2", (None, 4)), ("must have exactly 4 dimensions in axis -1", (4, None)), ) def test_parameters_from_right_handed_shape_exception_raised( self, error_msg, *shapes): """Checks the inputs of the from_right_handed_shape function.""" self.assert_exception_is_raised(perspective.parameters_from_right_handed, error_msg, shapes) @parameterized.parameters( ((4, 4),), ((None, 4, 4),), ((None, None, 4, 4),), ) def test_parameters_from_right_handed_shape_exception_not_raised( self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised( perspective.parameters_from_right_handed, shapes) def test_parameters_from_right_handed_random(self): """Tests that parameters_from_right_handed returns the expected values.""" tensor_size = np.random.randint(2, 4) tensor_shape = np.random.randint(2, 5, size=(tensor_size)).tolist() vertical_field_of_view_gt = np.random.uniform( sys.float_info.epsilon, np.pi - sys.float_info.epsilon, tensor_shape + [1]) aspect_ratio_gt = np.random.uniform(0.1, 10.0, tensor_shape + [1]) near_gt = np.random.uniform(0.1, 100.0, tensor_shape + [1]) far_gt = near_gt + np.random.uniform(0.1, 100.0, tensor_shape + [1]) projection_matrix = perspective.right_handed(vertical_field_of_view_gt, aspect_ratio_gt, near_gt, far_gt) vertical_field_of_view_pred, aspect_ratio_pred, near_pred, far_pred = perspective.parameters_from_right_handed( projection_matrix) with self.subTest(name="vertical_field_of_view"): self.assertAllClose(vertical_field_of_view_gt, vertical_field_of_view_pred) with self.subTest(name="aspect_ratio"): self.assertAllClose(aspect_ratio_gt, aspect_ratio_pred) with self.subTest(name="near_plane"): self.assertAllClose(near_gt, near_pred) with self.subTest(name="far_plane"): self.assertAllClose(far_gt, far_pred) def test_parameters_from_right_handed_jacobian_random(self): """Tests the Jacobian of parameters_from_right_handed.""" tensor_size = np.random.randint(2, 4) tensor_shape = np.random.randint(2, 5, size=(tensor_size)).tolist() vertical_field_of_view = np.random.uniform(sys.float_info.epsilon, np.pi - sys.float_info.epsilon, tensor_shape + [1]) aspect_ratio = np.random.uniform(0.1, 10.0, tensor_shape + [1]) near = np.random.uniform(0.1, 100.0, tensor_shape + [1]) far = near + np.random.uniform(0.1, 100.0, tensor_shape + [1]) projection_matrix = perspective.right_handed(vertical_field_of_view, aspect_ratio, near, far) with self.subTest(name="vertical_field_of_view"): self.assert_jacobian_is_finite_fn( lambda x: perspective.parameters_from_right_handed(x)[0], [projection_matrix]) with self.subTest(name="aspect_ratio"): self.assert_jacobian_is_finite_fn( lambda x: perspective.parameters_from_right_handed(x)[1], [projection_matrix]) with self.subTest(name="near_plane"): self.assert_jacobian_is_finite_fn( lambda x: perspective.parameters_from_right_handed(x)[2], [projection_matrix]) with self.subTest(name="far_plane"): self.assert_jacobian_is_finite_fn( lambda x: perspective.parameters_from_right_handed(x)[3], [projection_matrix]) def test_perspective_right_handed_preset(self): """Tests that perspective_right_handed generates expected results.""" vertical_field_of_view = ((60.0 * math.pi / 180.0,), (50.0 * math.pi / 180.0,)) aspect_ratio = ((1.5,), (1.1,)) near = ((1.0,), (1.2,)) far = ((10.0,), (5.0,)) pred = perspective.right_handed(vertical_field_of_view, aspect_ratio, near, far) gt = (((1.15470052, 0.0, 0.0, 0.0), (0.0, 1.73205066, 0.0, 0.0), (0.0, 0.0, -1.22222221, -2.22222233), (0.0, 0.0, -1.0, 0.0)), ((1.9495517, 0.0, 0.0, 0.0), (0.0, 2.14450693, 0.0, 0.0), (0.0, 0.0, -1.63157892, -3.15789485), (0.0, 0.0, -1.0, 0.0))) self.assertAllClose(pred, gt) @parameterized.parameters( ((1,), (1,), (1,), (1,)), ((None, 1), (None, 1), (None, 1), (None, 1)), ((None, 3, 1), (None, 3, 1), (None, 3, 1), (None, 3, 1)), ) def test_perspective_right_handed_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(perspective.right_handed, shapes) @parameterized.parameters( ("Not all batch dimensions are identical", (1,), (3, 1), (3, 1), (3, 1)), ("Not all batch dimensions are identical", (3, 1), (None, 3, 1), (3, 1), (3, 1)), ) def test_perspective_right_handed_shape_exception_raised( self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(perspective.right_handed, error_msg, shapes) @parameterized.parameters( ((1.0,), (1.0,), np.random.uniform(-1.0, 0.0, size=(1,)).astype(np.float32), (1.0,)), ((1.0,), (1.0,), (0.0,), (1.0,)), ((1.0,), np.random.uniform(-1.0, 0.0, size=(1,)).astype(np.float32), (0.1,), (1.0,)), ((1.0,), (0.0,), (0.1,), (1.0,)), ((1.0,), (1.0,), np.random.uniform(1.0, 2.0, size=(1,)).astype(np.float32), np.random.uniform(0.1, 0.5, size=(1,)).astype(np.float32)), ((1.0,), (1.0,), (0.1,), (0.1,)), (np.random.uniform(-math.pi, 0.0, size=(1,)).astype(np.float32), (1.0,), (0.1,), (1.0,)), (np.random.uniform(math.pi, 2.0 * math.pi, size=(1,)).astype(np.float32), (1.0,), (0.1,), (1.0,)), ((0.0,), (1.0,), (0.1,), (1.0,)), ((math.pi,), (1.0,), (0.1,), (1.0,)), ) def test_perspective_right_handed_valid_range_exception_raised( self, vertical_field_of_view, aspect_ratio, near, far): """Tests that an exception is raised with out of bounds values.""" with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( perspective.right_handed(vertical_field_of_view, aspect_ratio, near, far)) def test_perspective_right_handed_cross_jacobian_preset(self): """Tests the Jacobian of perspective_right_handed.""" vertical_field_of_view_init = np.array((1.0,)) aspect_ratio_init = np.array((1.0,)) near_init = np.array((1.0,)) far_init = np.array((10.0,)) self.assert_jacobian_is_correct_fn( perspective.right_handed, [vertical_field_of_view_init, aspect_ratio_init, near_init, far_init]) def test_perspective_right_handed_cross_jacobian_random(self): """Tests the Jacobian of perspective_right_handed.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() eps = np.finfo(np.float64).eps vertical_field_of_view_init = np.random.uniform( eps, math.pi - eps, size=tensor_shape + [1]) aspect_ratio_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1]) near_init = np.random.uniform(eps, 10.0, size=tensor_shape + [1]) far_init = np.random.uniform(10 + eps, 100.0, size=tensor_shape + [1]) self.assert_jacobian_is_correct_fn( perspective.right_handed, [vertical_field_of_view_init, aspect_ratio_init, near_init, far_init]) @parameterized.parameters( ((3, 3),), ((3, 3, 3),), ((None, 3, 3),), ) def test_intrinsics_from_matrix_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(perspective.intrinsics_from_matrix, shapes) @parameterized.parameters( ("must have a rank greater than 1", (3,)), ("must have exactly 3 dimensions in axis -2", (None, 3)), ("must have exactly 3 dimensions in axis -1", (3, None)), ) def test_intrinsics_from_matrix_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(perspective.intrinsics_from_matrix, error_msg, shapes) @parameterized.parameters( ((((0., 0., 0.), (0., 0., 0.), (0., 0., 1.)),), ((0., 0.), (0., 0.), (0.0,))), ((((1., 0., 3.), (0., 2., 4.), (0., 0., 1.)),), ((1., 2.), (3., 4.), (0.0,))), ) def test_intrinsics_from_matrix_preset(self, test_inputs, test_outputs): """Tests that intrinsics_from_matrix gives the correct result.""" self.assert_output_is_correct(perspective.intrinsics_from_matrix, test_inputs, test_outputs) def test_intrinsics_from_matrix_to_intrinsics_random(self): """Tests that converting intrinsics to a matrix and back is consistent.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() random_focal = np.random.normal(size=tensor_shape + [2]) random_principal_point = np.random.normal(size=tensor_shape + [2]) random_skew_coeff = np.random.normal(size=tensor_shape + [1]) matrix = perspective.matrix_from_intrinsics(random_focal, random_principal_point, random_skew_coeff) focal, principal_point, skew_coeff = perspective.intrinsics_from_matrix( matrix) random_skew_coeff = np.reshape(random_skew_coeff, (1, 1)) self.assertAllClose(random_focal, focal, rtol=1e-3) self.assertAllClose(random_principal_point, principal_point, rtol=1e-3) self.assertAllClose(random_skew_coeff, skew_coeff, rtol=1e-3) @parameterized.parameters( ((2,), (2,), (1,)), ((2, 2), (2, 2), (2, 1)), ((None, 2), (None, 2), (None, 1)), ) def test_matrix_from_intrinsics_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(perspective.matrix_from_intrinsics, shapes) @parameterized.parameters( ((2,), (2,)), ((2, 2), (2, 2)), ((None, 2), (None, 2)), ) def test_matrix_from_intrinsics_exception_not_raised_when_skew_not_passed( self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(perspective.matrix_from_intrinsics, shapes) @parameterized.parameters( ("must have exactly 2 dimensions in axis -1", (None,), (2,)), ("must have exactly 2 dimensions in axis -1", (2,), (None,)), ("Not all batch dimensions are identical.", (3, 2), (2, 2)), ) def test_matrix_from_intrinsics_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(perspective.matrix_from_intrinsics, error_msg, shapes) @parameterized.parameters( (((0.0, 0.0), (0.0, 0.0), (0.0,)), (((0.0, 0.0, 0.0), (0.0, 0.0, 0.0), (0.0, 0.0, 1.0)),)), (((1.0, 2.0), (3.0, 4.0), (0.0,)), (((1.0, 0.0, 3.0), (0.0, 2.0, 4.0), (0.0, 0.0, 1.0)),))) def test_matrix_from_intrinsics_preset(self, test_inputs, test_outputs): """Tests that matrix_from_intrinsics gives the correct result.""" self.assert_output_is_correct(perspective.matrix_from_intrinsics, test_inputs, test_outputs) def test_matrix_from_intrinsics_to_matrix_random(self): """Tests that converting a matrix to intrinsics and back is consistent.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() random_focal = np.random.normal(size=tensor_shape + [2]) random_principal_point = np.random.normal(size=tensor_shape + [2]) fx = random_focal[..., 0] fy = random_focal[..., 1] cx = random_principal_point[..., 0] cy = random_principal_point[..., 1] zero = np.zeros_like(fx) one = np.ones_like(fx) random_matrix = np.stack((fx, zero, cx, zero, fy, cy, zero, zero, one), axis=-1).reshape(tensor_shape + [3, 3]) focal, principal_point, skew_coefficient = perspective.intrinsics_from_matrix( random_matrix) matrix = perspective.matrix_from_intrinsics(focal, principal_point, skew_coefficient) self.assertAllClose(random_matrix, matrix, rtol=1e-3) @parameterized.parameters( ((3,), (2,), (2,)), ((2, 3), (2, 2), (2, 2)), ((2, 3), (2,), (2,)), ((None, 3), (None, 2), (None, 2)), ) def test_project_exception_not_exception_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(perspective.project, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (None,), (2,), (2,)), ("must have exactly 2 dimensions in axis -1", (3,), (None,), (2,)), ("must have exactly 2 dimensions in axis -1", (3,), (2,), (None,)), ("Not all batch dimensions are broadcast-compatible.", (3, 3), (2, 2), (2, 2)), ) def test_project_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(perspective.project, error_msg, shape) @parameterized.parameters( (((0., 0., 1.), (1., 1.), (0., 0.)), ((0., 0.),)), (((4., 2., 1.), (1., 1.), (-4., -2.)), ((0., 0.),)), (((4., 2., 10.), (1., 1.), (-.4, -.2)), ((0., 0.),)), (((4., 2., 10.), (2., 1.), (-.8, -.2)), ((0., 0.),)), (((4., 2., 10.), (2., 1.), (-.8, 0.)), ((0., .2),)), ) def test_project_preset(self, test_inputs, test_outputs): """Tests that the project function gives the correct result.""" self.assert_output_is_correct(perspective.project, test_inputs, test_outputs) def test_project_unproject_random(self): """Tests that projecting and unprojecting gives an identity mapping.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() random_point_3d = np.random.normal(size=tensor_shape + [3]) random_focal = np.random.normal(size=tensor_shape + [2]) random_principal_point = np.random.normal(size=tensor_shape + [2]) random_depth = np.expand_dims(random_point_3d[..., 2], axis=-1) point_2d = perspective.project(random_point_3d, random_focal, random_principal_point) point_3d = perspective.unproject(point_2d, random_depth, random_focal, random_principal_point) self.assertAllClose(random_point_3d, point_3d, rtol=1e-3) def test_project_ray_random(self): """Tests that that ray is pointing toward the correct location.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() random_point_3d = np.random.normal(size=tensor_shape + [3]) random_focal = np.random.normal(size=tensor_shape + [2]) random_principal_point = np.random.normal(size=tensor_shape + [2]) random_depth = np.expand_dims(random_point_3d[..., 2], axis=-1) point_2d = perspective.project(random_point_3d, random_focal, random_principal_point) ray_3d = perspective.ray(point_2d, random_focal, random_principal_point) ray_3d = random_depth * ray_3d self.assertAllClose(random_point_3d, ray_3d, rtol=1e-3) @parameterized.parameters( ((2,), (2,), (2,)), ((2, 2), (2, 2), (2, 2)), ((3, 2), (1, 2), (2,)), ((None, 2), (None, 2), (None, 2)), ) def test_ray_exception_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(perspective.ray, shapes) @parameterized.parameters( ("must have exactly 2 dimensions in axis -1", (None,), (2,), (2,)), ("must have exactly 2 dimensions in axis -1", (2,), (None,), (2,)), ("must have exactly 2 dimensions in axis -1", (2,), (2,), (None,)), ("Not all batch dimensions are broadcast-compatible.", (3, 2), (1, 2), (2, 2)), ) def test_ray_exception_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(perspective.ray, error_msg, shapes) @parameterized.parameters( (((0., 0.), (1., 1.), (0., 0.)), ((0., 0., 1.),)), (((0., 0.), (1., 1.), (-1., -2.)), ((1., 2., 1.),)), (((0., 0.), (10., 1.), (-1., -2.)), ((.1, 2., 1.),)), (((-2., -4.), (10., 1.), (-3., -6.)), ((.1, 2., 1.),)), ) def test_ray_preset(self, test_inputs, test_outputs): """Tests that the ray function gives the correct result.""" self.assert_output_is_correct(perspective.ray, test_inputs, test_outputs) def test_ray_project_random(self): """Tests that the end point of the ray projects at the good location.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() random_point_2d = np.random.normal(size=tensor_shape + [2]) random_focal = np.random.normal(size=tensor_shape + [2]) random_principal_point = np.random.normal(size=tensor_shape + [2]) ray_3d = perspective.ray(random_point_2d, random_focal, random_principal_point) point_2d = perspective.project(ray_3d, random_focal, random_principal_point) self.assertAllClose(random_point_2d, point_2d, rtol=1e-3) @parameterized.parameters( ((2,), (1,), (2,), (2,)), ((2, 2), (2, 1), (2, 2), (2, 2)), ((None, 2), (None, 1), (None, 2), (None, 2)), ) def test_unproject_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(perspective.unproject, shapes) @parameterized.parameters( ("must have exactly 2 dimensions in axis -1", (None,), (1,), (2,), (2,)), ("must have exactly 1 dimensions in axis -1", (2,), (None,), (2,), (2,)), ("must have exactly 2 dimensions in axis -1", (2,), (1,), (None,), (2,)), ("must have exactly 2 dimensions in axis -1", (2,), (1,), (2,), (None,)), ("Not all batch dimensions are identical.", (1, 2), (2, 1), (2, 2), (2, 2)), ) def test_unproject_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(perspective.unproject, error_msg, shapes) @parameterized.parameters( (((0., 0.), (1.,), (1., 1.), (0., 0.)), ((0., 0., 1.),)), (((0., 0.), (1.,), (1., 1.), (-4., -2.)), ((4., 2., 1.),)), (((0., 0.), (10.,), (1., 1.), (-.4, -.2)), ((4., 2., 10.),)), (((0., 0.), (10.,), (2., 1.), (-.8, -.2)), ((4., 2., 10.),)), (((0., .2), (10.,), (2., 1.), (-.8, 0.)), ((4., 2., 10.),)), ) def test_unproject_preset(self, test_inputs, test_outputs): """Tests that the unproject function gives the correct result.""" self.assert_output_is_correct(perspective.unproject, test_inputs, test_outputs) def test_unproject_project_random(self): """Tests that unprojecting and projecting gives and identity mapping.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() random_point_2d = np.random.normal(size=tensor_shape + [2]) random_focal = np.random.normal(size=tensor_shape + [2]) random_principal_point = np.random.normal(size=tensor_shape + [2]) random_depth = np.random.normal(size=tensor_shape + [1]) point_3d = perspective.unproject(random_point_2d, random_depth, random_focal, random_principal_point) point_2d = perspective.project(point_3d, random_focal, random_principal_point) self.assertAllClose(random_point_2d, point_2d, rtol=1e-3) def test_unproject_ray_random(self): """Tests that that ray is pointing toward the correct location.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() random_point_2d = np.random.normal(size=tensor_shape + [2]) random_focal = np.random.normal(size=tensor_shape + [2]) random_principal_point = np.random.normal(size=tensor_shape + [2]) random_depth = np.random.normal(size=tensor_shape + [1]) point_3d = perspective.unproject(random_point_2d, random_depth, random_focal, random_principal_point) ray_3d = perspective.ray(random_point_2d, random_focal, random_principal_point) ray_3d = random_depth * ray_3d self.assertAllClose(point_3d, ray_3d, rtol=1e-3) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/geometry/transformation/tests/test_data.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module with test data for transformation tests.""" import numpy as np ANGLE_0 = np.array((0.,)) ANGLE_45 = np.array((np.pi / 4.,)) ANGLE_90 = np.array((np.pi / 2.,)) ANGLE_180 = np.array((np.pi,)) AXIS_2D_0 = np.array((0., 0.)) AXIS_2D_X = np.array((1., 0.)) AXIS_2D_Y = np.array((0., 1.)) def _rotation_2d_x(angle): """Creates a 2d rotation matrix. Args: angle: The angle. Returns: The 2d rotation matrix. """ angle = angle.item() return np.array(((np.cos(angle), -np.sin(angle)), (np.sin(angle), np.cos(angle)))) # pyformat: disable MAT_2D_ID = np.eye(2) MAT_2D_45 = _rotation_2d_x(ANGLE_45) MAT_2D_90 = _rotation_2d_x(ANGLE_90) MAT_2D_180 = _rotation_2d_x(ANGLE_180) AXIS_3D_0 = np.array((0., 0., 0.)) AXIS_3D_X = np.array((1., 0., 0.)) AXIS_3D_Y = np.array((0., 1., 0.)) AXIS_3D_Z = np.array((0., 0., 1.)) def _axis_angle_to_quaternion(axis, angle): """Converts an axis-angle representation to a quaternion. Args: axis: The axis of rotation. angle: The angle. Returns: The quaternion. """ quat = np.zeros(4) quat[0:3] = axis * np.sin(0.5 * angle) quat[3] = np.cos(0.5 * angle) return quat QUAT_ID = _axis_angle_to_quaternion(AXIS_3D_0, ANGLE_0) QUAT_X_45 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_45) QUAT_X_90 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_90) QUAT_X_180 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_180) QUAT_Y_45 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_45) QUAT_Y_90 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_90) QUAT_Y_180 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_180) QUAT_Z_45 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_45) QUAT_Z_90 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_90) QUAT_Z_180 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_180) def _rotation_3d_x(angle): """Creates a 3d rotation matrix around the x axis. Args: angle: The angle. Returns: The 3d rotation matrix. """ angle = angle.item() return np.array(((1., 0., 0.), (0., np.cos(angle), -np.sin(angle)), (0., np.sin(angle), np.cos(angle)))) # pyformat: disable def _rotation_3d_y(angle): """Creates a 3d rotation matrix around the y axis. Args: angle: The angle. Returns: The 3d rotation matrix. """ angle = angle.item() return np.array(((np.cos(angle), 0., np.sin(angle)), (0., 1., 0.), (-np.sin(angle), 0., np.cos(angle)))) # pyformat: disable def _rotation_3d_z(angle): """Creates a 3d rotation matrix around the z axis. Args: angle: The angle. Returns: The 3d rotation matrix. """ angle = angle.item() return np.array(((np.cos(angle), -np.sin(angle), 0.), (np.sin(angle), np.cos(angle), 0.), (0., 0., 1.))) # pyformat: disable MAT_3D_ID = np.eye(3) MAT_3D_X_45 = _rotation_3d_x(ANGLE_45) MAT_3D_X_90 = _rotation_3d_x(ANGLE_90) MAT_3D_X_180 = _rotation_3d_x(ANGLE_180) MAT_3D_Y_45 = _rotation_3d_y(ANGLE_45) MAT_3D_Y_90 = _rotation_3d_y(ANGLE_90) MAT_3D_Y_180 = _rotation_3d_y(ANGLE_180) MAT_3D_Z_45 = _rotation_3d_z(ANGLE_45) MAT_3D_Z_90 = _rotation_3d_z(ANGLE_90) MAT_3D_Z_180 = _rotation_3d_z(ANGLE_180)
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module with test data for transformation tests.""" import numpy as np ANGLE_0 = np.array((0.,)) ANGLE_45 = np.array((np.pi / 4.,)) ANGLE_90 = np.array((np.pi / 2.,)) ANGLE_180 = np.array((np.pi,)) AXIS_2D_0 = np.array((0., 0.)) AXIS_2D_X = np.array((1., 0.)) AXIS_2D_Y = np.array((0., 1.)) def _rotation_2d_x(angle): """Creates a 2d rotation matrix. Args: angle: The angle. Returns: The 2d rotation matrix. """ angle = angle.item() return np.array(((np.cos(angle), -np.sin(angle)), (np.sin(angle), np.cos(angle)))) # pyformat: disable MAT_2D_ID = np.eye(2) MAT_2D_45 = _rotation_2d_x(ANGLE_45) MAT_2D_90 = _rotation_2d_x(ANGLE_90) MAT_2D_180 = _rotation_2d_x(ANGLE_180) AXIS_3D_0 = np.array((0., 0., 0.)) AXIS_3D_X = np.array((1., 0., 0.)) AXIS_3D_Y = np.array((0., 1., 0.)) AXIS_3D_Z = np.array((0., 0., 1.)) def _axis_angle_to_quaternion(axis, angle): """Converts an axis-angle representation to a quaternion. Args: axis: The axis of rotation. angle: The angle. Returns: The quaternion. """ quat = np.zeros(4) quat[0:3] = axis * np.sin(0.5 * angle) quat[3] = np.cos(0.5 * angle) return quat QUAT_ID = _axis_angle_to_quaternion(AXIS_3D_0, ANGLE_0) QUAT_X_45 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_45) QUAT_X_90 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_90) QUAT_X_180 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_180) QUAT_Y_45 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_45) QUAT_Y_90 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_90) QUAT_Y_180 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_180) QUAT_Z_45 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_45) QUAT_Z_90 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_90) QUAT_Z_180 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_180) def _rotation_3d_x(angle): """Creates a 3d rotation matrix around the x axis. Args: angle: The angle. Returns: The 3d rotation matrix. """ angle = angle.item() return np.array(((1., 0., 0.), (0., np.cos(angle), -np.sin(angle)), (0., np.sin(angle), np.cos(angle)))) # pyformat: disable def _rotation_3d_y(angle): """Creates a 3d rotation matrix around the y axis. Args: angle: The angle. Returns: The 3d rotation matrix. """ angle = angle.item() return np.array(((np.cos(angle), 0., np.sin(angle)), (0., 1., 0.), (-np.sin(angle), 0., np.cos(angle)))) # pyformat: disable def _rotation_3d_z(angle): """Creates a 3d rotation matrix around the z axis. Args: angle: The angle. Returns: The 3d rotation matrix. """ angle = angle.item() return np.array(((np.cos(angle), -np.sin(angle), 0.), (np.sin(angle), np.cos(angle), 0.), (0., 0., 1.))) # pyformat: disable MAT_3D_ID = np.eye(3) MAT_3D_X_45 = _rotation_3d_x(ANGLE_45) MAT_3D_X_90 = _rotation_3d_x(ANGLE_90) MAT_3D_X_180 = _rotation_3d_x(ANGLE_180) MAT_3D_Y_45 = _rotation_3d_y(ANGLE_45) MAT_3D_Y_90 = _rotation_3d_y(ANGLE_90) MAT_3D_Y_180 = _rotation_3d_y(ANGLE_180) MAT_3D_Z_45 = _rotation_3d_z(ANGLE_45) MAT_3D_Z_90 = _rotation_3d_z(ANGLE_90) MAT_3D_Z_180 = _rotation_3d_z(ANGLE_180)
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/io/tests/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/math/tests/math_helpers_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for math_helpers.""" import sys from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.math import math_helpers from tensorflow_graphics.util import asserts from tensorflow_graphics.util import test_case class MathTest(test_case.TestCase): @parameterized.parameters( (((0.0, 0.0, 0.0),), ((0.0, np.pi / 2.0, 0.0),)), (((2.0, 0.0, 0.0),), ((2.0, np.pi / 2.0, 0.0),)), (((0.0, 1.0, 0.0),), ((1.0, np.pi / 2.0, np.pi / 2.0),)), (((0.0, 0.0, 1.0),), ((1.0, 0.0, 0.0),)), (((-1.0, 0.0, 0.0),), ((1.0, np.pi / 2.0, np.pi),)), (((0.0, -1.0, 0.0),), ((1.0, np.pi / 2.0, -np.pi / 2.0),)), (((0.0, 0.0, -1.0),), ((1.0, np.pi, 0.0),)), ) def test_cartesian_to_spherical_coordinates_preset(self, test_inputs, test_outputs): """Tests that cartesian_to_spherical_coordinates behaves as expected.""" self.assert_output_is_correct( math_helpers.cartesian_to_spherical_coordinates, test_inputs, test_outputs) @parameterized.parameters( ((3,),), ((None, 3),), ) def test_cartesian_to_spherical_coordinates_not_raised(self, *shape): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised( math_helpers.cartesian_to_spherical_coordinates, shape) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (1,)),) def test_cartesian_to_spherical_coordinates_raised(self, error_msg, *shape): """Tests that the shape exception is raised.""" self.assert_exception_is_raised( math_helpers.cartesian_to_spherical_coordinates, error_msg, shape) def test_cartesian_to_spherical_coordinates_jacobian_random(self): """Test the Jacobian of the spherical_to_cartesian_coordinates function.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() point_init = np.random.uniform(-10.0, 10.0, size=tensor_shape + [3]) self.assert_jacobian_is_correct_fn( math_helpers.cartesian_to_spherical_coordinates, [point_init]) @parameterized.parameters( (((1.0, 1.0, 1.0),),), (((1.0, 0.0, 0.0),),), (((0.0, 1.0, 0.0),),), ) def test_cartesian_to_spherical_coordinates_jacobian_preset(self, cartesian): """Test the Jacobian of the spherical_to_cartesian_coordinates function.""" point_init = np.asarray(cartesian) self.assert_jacobian_is_correct_fn( math_helpers.cartesian_to_spherical_coordinates, [point_init]) @parameterized.parameters( (((1.0, 1.0, 0.0),), ((np.sqrt(2.0), np.pi / 2.0, np.pi / 4.0),)), (((1.0, 0.0, 0.0),), ((1.0, np.pi / 2.0, 0.0),)), (((0.0, 1.0, 0.0),), ((1.0, np.pi / 2.0, np.pi / 2.0),)), (((0.0, 0.0, 1.0),), ((1.0, 0.0, 0.0),)), (((0.0, 0.0, 0.0),), ((0.0, np.pi / 2.0, 0.0),)), ) def test_cartesian_to_spherical_coordinates_values_preset( self, test_inputs, test_outputs): """Test the Jacobian of the spherical_to_cartesian_coordinates function.""" self.assert_output_is_correct( math_helpers.cartesian_to_spherical_coordinates, test_inputs, test_outputs) @parameterized.parameters( (((0, 1, 5, 6, 15.0),), ((1, 1, 15, 48, 2027025.0),)),) def test_double_factorial_preset(self, test_inputs, test_outputs): """Tests that double_factorial generates expected results.""" self.assert_output_is_correct(math_helpers.double_factorial, test_inputs, test_outputs) @parameterized.parameters( (((0, 1, 2, 3, 4.0),), ((1, 1, 2, 6, 24.0),)),) def test_factorial_preset(self, test_inputs, test_outputs): """Tests that double_factorial generates expected results.""" self.assert_output_is_correct(math_helpers.factorial, test_inputs, test_outputs) @parameterized.parameters( (((2.0, np.pi / 2.0, 0.0),), ((2.0, 0.0, 0.0),)), (((2.0, -3.0 * np.pi / 2.0, 0.0),), ((2.0, 0.0, 0.0),)), (((1.0, np.pi / 2.0, np.pi / 2.0),), ((0.0, 1.0, 0.0),)), (((1.0, 0.0, 0.0),), ((0.0, 0.0, 1.0),)), ) def test_spherical_to_cartesian_coordinates_preset(self, test_inputs, test_outputs): """Tests that spherical_to_cartesian_coordinates behaves as expected.""" self.assert_output_is_correct( math_helpers.spherical_to_cartesian_coordinates, test_inputs, test_outputs) @parameterized.parameters( ((3,),), ((None, 3),), ) def test_spherical_to_cartesian_coordinates_not_raised(self, *shape): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised( math_helpers.spherical_to_cartesian_coordinates, shape) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (1,)),) def test_spherical_to_cartesian_coordinates_raised(self, error_msg, *shape): """Tests that the shape exception is raised.""" self.assert_exception_is_raised( math_helpers.spherical_to_cartesian_coordinates, error_msg, shape) def test_spherical_to_cartesian_coordinates_jacobian_random(self): """Test the Jacobian of the spherical_to_cartesian_coordinates function.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() r_init = np.random.uniform(0.0, 10.0, size=tensor_shape + [1]) theta_init = np.random.uniform( -np.pi / 2.0, np.pi / 2.0, size=tensor_shape + [1]) phi_init = np.random.uniform(-np.pi, np.pi, size=tensor_shape + [1]) data_init = np.stack((r_init, theta_init, phi_init), axis=-1) self.assert_jacobian_is_correct_fn( math_helpers.spherical_to_cartesian_coordinates, [data_init]) @parameterized.parameters( (((0.0, 0.0),), ((1.0, 0.0, 0.0),)), (((1.0, 0.0),), ((1.0, np.pi, 0.0),)), (((0.0, 1.0),), ((1.0, 0.0, 2.0 * np.pi),)), (((1.0, 1.0),), ((1.0, np.pi, 2.0 * np.pi),)), ) def test_square_to_spherical_coordinates_preset(self, test_inputs, test_outputs): """Tests that square_to_spherical_coordinates generates expected results.""" self.assert_output_is_correct(math_helpers.square_to_spherical_coordinates, test_inputs, test_outputs) def test_square_to_spherical_coordinates_jacobian_random(self): """Tests the Jacobian of square_to_spherical_coordinates.""" epsilon = 1e-3 point_2d_init = np.random.uniform(epsilon, 1.0 - epsilon, size=(10, 2)) self.assert_jacobian_is_correct_fn( math_helpers.square_to_spherical_coordinates, [point_2d_init], atol=1e-3) def test_square_to_spherical_coordinates_range_exception_raised(self): """Tests that the exceptions are raised correctly.""" point_2d_below = np.random.uniform(-1.0, -sys.float_info.epsilon, size=(2,)) point_2d_above = np.random.uniform( 1.0 + asserts.select_eps_for_addition(tf.float32), 2.0, size=(2,)) with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( math_helpers.square_to_spherical_coordinates(point_2d_below)) with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( math_helpers.square_to_spherical_coordinates(point_2d_above)) @parameterized.parameters( ((2,),), ((None, 2),), ) def test_square_to_spherical_coordinates_shape_exception_not_raised( self, *shape): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised( math_helpers.square_to_spherical_coordinates, shape) @parameterized.parameters( ("must have exactly 2 dimensions in axis -1", (1,)), ("must have exactly 2 dimensions in axis -1", (3,)), ) def test_square_to_spherical_coordinates_shape_exception_raised( self, error_msg, *shape): """Tests that the shape exception is raised.""" self.assert_exception_is_raised( math_helpers.square_to_spherical_coordinates, error_msg, shape) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for math_helpers.""" import sys from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.math import math_helpers from tensorflow_graphics.util import asserts from tensorflow_graphics.util import test_case class MathTest(test_case.TestCase): @parameterized.parameters( (((0.0, 0.0, 0.0),), ((0.0, np.pi / 2.0, 0.0),)), (((2.0, 0.0, 0.0),), ((2.0, np.pi / 2.0, 0.0),)), (((0.0, 1.0, 0.0),), ((1.0, np.pi / 2.0, np.pi / 2.0),)), (((0.0, 0.0, 1.0),), ((1.0, 0.0, 0.0),)), (((-1.0, 0.0, 0.0),), ((1.0, np.pi / 2.0, np.pi),)), (((0.0, -1.0, 0.0),), ((1.0, np.pi / 2.0, -np.pi / 2.0),)), (((0.0, 0.0, -1.0),), ((1.0, np.pi, 0.0),)), ) def test_cartesian_to_spherical_coordinates_preset(self, test_inputs, test_outputs): """Tests that cartesian_to_spherical_coordinates behaves as expected.""" self.assert_output_is_correct( math_helpers.cartesian_to_spherical_coordinates, test_inputs, test_outputs) @parameterized.parameters( ((3,),), ((None, 3),), ) def test_cartesian_to_spherical_coordinates_not_raised(self, *shape): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised( math_helpers.cartesian_to_spherical_coordinates, shape) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (1,)),) def test_cartesian_to_spherical_coordinates_raised(self, error_msg, *shape): """Tests that the shape exception is raised.""" self.assert_exception_is_raised( math_helpers.cartesian_to_spherical_coordinates, error_msg, shape) def test_cartesian_to_spherical_coordinates_jacobian_random(self): """Test the Jacobian of the spherical_to_cartesian_coordinates function.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() point_init = np.random.uniform(-10.0, 10.0, size=tensor_shape + [3]) self.assert_jacobian_is_correct_fn( math_helpers.cartesian_to_spherical_coordinates, [point_init]) @parameterized.parameters( (((1.0, 1.0, 1.0),),), (((1.0, 0.0, 0.0),),), (((0.0, 1.0, 0.0),),), ) def test_cartesian_to_spherical_coordinates_jacobian_preset(self, cartesian): """Test the Jacobian of the spherical_to_cartesian_coordinates function.""" point_init = np.asarray(cartesian) self.assert_jacobian_is_correct_fn( math_helpers.cartesian_to_spherical_coordinates, [point_init]) @parameterized.parameters( (((1.0, 1.0, 0.0),), ((np.sqrt(2.0), np.pi / 2.0, np.pi / 4.0),)), (((1.0, 0.0, 0.0),), ((1.0, np.pi / 2.0, 0.0),)), (((0.0, 1.0, 0.0),), ((1.0, np.pi / 2.0, np.pi / 2.0),)), (((0.0, 0.0, 1.0),), ((1.0, 0.0, 0.0),)), (((0.0, 0.0, 0.0),), ((0.0, np.pi / 2.0, 0.0),)), ) def test_cartesian_to_spherical_coordinates_values_preset( self, test_inputs, test_outputs): """Test the Jacobian of the spherical_to_cartesian_coordinates function.""" self.assert_output_is_correct( math_helpers.cartesian_to_spherical_coordinates, test_inputs, test_outputs) @parameterized.parameters( (((0, 1, 5, 6, 15.0),), ((1, 1, 15, 48, 2027025.0),)),) def test_double_factorial_preset(self, test_inputs, test_outputs): """Tests that double_factorial generates expected results.""" self.assert_output_is_correct(math_helpers.double_factorial, test_inputs, test_outputs) @parameterized.parameters( (((0, 1, 2, 3, 4.0),), ((1, 1, 2, 6, 24.0),)),) def test_factorial_preset(self, test_inputs, test_outputs): """Tests that double_factorial generates expected results.""" self.assert_output_is_correct(math_helpers.factorial, test_inputs, test_outputs) @parameterized.parameters( (((2.0, np.pi / 2.0, 0.0),), ((2.0, 0.0, 0.0),)), (((2.0, -3.0 * np.pi / 2.0, 0.0),), ((2.0, 0.0, 0.0),)), (((1.0, np.pi / 2.0, np.pi / 2.0),), ((0.0, 1.0, 0.0),)), (((1.0, 0.0, 0.0),), ((0.0, 0.0, 1.0),)), ) def test_spherical_to_cartesian_coordinates_preset(self, test_inputs, test_outputs): """Tests that spherical_to_cartesian_coordinates behaves as expected.""" self.assert_output_is_correct( math_helpers.spherical_to_cartesian_coordinates, test_inputs, test_outputs) @parameterized.parameters( ((3,),), ((None, 3),), ) def test_spherical_to_cartesian_coordinates_not_raised(self, *shape): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised( math_helpers.spherical_to_cartesian_coordinates, shape) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (1,)),) def test_spherical_to_cartesian_coordinates_raised(self, error_msg, *shape): """Tests that the shape exception is raised.""" self.assert_exception_is_raised( math_helpers.spherical_to_cartesian_coordinates, error_msg, shape) def test_spherical_to_cartesian_coordinates_jacobian_random(self): """Test the Jacobian of the spherical_to_cartesian_coordinates function.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() r_init = np.random.uniform(0.0, 10.0, size=tensor_shape + [1]) theta_init = np.random.uniform( -np.pi / 2.0, np.pi / 2.0, size=tensor_shape + [1]) phi_init = np.random.uniform(-np.pi, np.pi, size=tensor_shape + [1]) data_init = np.stack((r_init, theta_init, phi_init), axis=-1) self.assert_jacobian_is_correct_fn( math_helpers.spherical_to_cartesian_coordinates, [data_init]) @parameterized.parameters( (((0.0, 0.0),), ((1.0, 0.0, 0.0),)), (((1.0, 0.0),), ((1.0, np.pi, 0.0),)), (((0.0, 1.0),), ((1.0, 0.0, 2.0 * np.pi),)), (((1.0, 1.0),), ((1.0, np.pi, 2.0 * np.pi),)), ) def test_square_to_spherical_coordinates_preset(self, test_inputs, test_outputs): """Tests that square_to_spherical_coordinates generates expected results.""" self.assert_output_is_correct(math_helpers.square_to_spherical_coordinates, test_inputs, test_outputs) def test_square_to_spherical_coordinates_jacobian_random(self): """Tests the Jacobian of square_to_spherical_coordinates.""" epsilon = 1e-3 point_2d_init = np.random.uniform(epsilon, 1.0 - epsilon, size=(10, 2)) self.assert_jacobian_is_correct_fn( math_helpers.square_to_spherical_coordinates, [point_2d_init], atol=1e-3) def test_square_to_spherical_coordinates_range_exception_raised(self): """Tests that the exceptions are raised correctly.""" point_2d_below = np.random.uniform(-1.0, -sys.float_info.epsilon, size=(2,)) point_2d_above = np.random.uniform( 1.0 + asserts.select_eps_for_addition(tf.float32), 2.0, size=(2,)) with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( math_helpers.square_to_spherical_coordinates(point_2d_below)) with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( math_helpers.square_to_spherical_coordinates(point_2d_above)) @parameterized.parameters( ((2,),), ((None, 2),), ) def test_square_to_spherical_coordinates_shape_exception_not_raised( self, *shape): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised( math_helpers.square_to_spherical_coordinates, shape) @parameterized.parameters( ("must have exactly 2 dimensions in axis -1", (1,)), ("must have exactly 2 dimensions in axis -1", (3,)), ) def test_square_to_spherical_coordinates_shape_exception_raised( self, error_msg, *shape): """Tests that the shape exception is raised.""" self.assert_exception_is_raised( math_helpers.square_to_spherical_coordinates, error_msg, shape) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/datasets/pix3d/pix3d_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for the Pix3D dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tensorflow_datasets.public_api as tfds from tensorflow_graphics.datasets import pix3d class Pix3dTest(tfds.testing.DatasetBuilderTestCase): """Test Cases for Pix3D Dataset implementation.""" DATASET_CLASS = pix3d.Pix3d SPLITS = { 'train': 2, # Number of fake train example 'test': 1, # Number of fake test example } DL_EXTRACT_RESULT = '' EXAMPLE_DIR = os.path.join(os.path.dirname(__file__), 'fakes') MOCK_OUT_FORBIDDEN_OS_FUNCTIONS = False # SKIP_CHECKSUMS = True def setUp(self): # pylint: disable=invalid-name super(Pix3dTest, self).setUp() self.builder.TRAIN_SPLIT_IDX = os.path.join(self.EXAMPLE_DIR, 'pix3d_train.npy') self.builder.TEST_SPLIT_IDX = os.path.join(self.EXAMPLE_DIR, 'pix3d_test.npy') if __name__ == '__main__': tfds.testing.test_main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for the Pix3D dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tensorflow_datasets.public_api as tfds from tensorflow_graphics.datasets import pix3d class Pix3dTest(tfds.testing.DatasetBuilderTestCase): """Test Cases for Pix3D Dataset implementation.""" DATASET_CLASS = pix3d.Pix3d SPLITS = { 'train': 2, # Number of fake train example 'test': 1, # Number of fake test example } DL_EXTRACT_RESULT = '' EXAMPLE_DIR = os.path.join(os.path.dirname(__file__), 'fakes') MOCK_OUT_FORBIDDEN_OS_FUNCTIONS = False # SKIP_CHECKSUMS = True def setUp(self): # pylint: disable=invalid-name super(Pix3dTest, self).setUp() self.builder.TRAIN_SPLIT_IDX = os.path.join(self.EXAMPLE_DIR, 'pix3d_train.npy') self.builder.TEST_SPLIT_IDX = os.path.join(self.EXAMPLE_DIR, 'pix3d_test.npy') if __name__ == '__main__': tfds.testing.test_main()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/rendering/camera/tests/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/projects/neural_voxel_renderer/models.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Definition of NVR+ keras model.""" import tensorflow.compat.v1 as tf import tensorflow_graphics.projects.neural_voxel_renderer.layers as layer_utils initializer = tf.keras.initializers.glorot_normal() layers = tf.keras.layers def unet_3x_with_res_in_mid(feat_in, out_filters, norm2d): """Helper function of a Unet with res blocks in the middle.""" e1 = layer_utils.residual_block_2d(feat_in, nfilters=128, strides=(2, 2), normalization=norm2d) # 16x128 e2 = layer_utils.residual_block_2d(e1, nfilters=256, strides=(2, 2), normalization=norm2d) # 8x256 e3 = layer_utils.residual_block_2d(e2, nfilters=512, strides=(2, 2), normalization=norm2d) # 4x512 mid1 = layer_utils.residual_block_2d(e3, nfilters=512, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d mid2 = layer_utils.residual_block_2d(mid1, nfilters=512, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d mid3 = layer_utils.residual_block_2d(mid2, nfilters=512, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d d0 = layer_utils.upconv(mid3, nfilters=256, size=4, strides=1) # 8x256 d1 = layers.concatenate([d0, e2]) # 8x512 d2 = layers.Conv2D(256, kernel_size=4, strides=(1, 1), padding='same', kernel_initializer=initializer)(d1) # 8x256 d3 = layer_utils.upconv(d2, nfilters=128, size=4, strides=1) # 16x128 d4 = layers.concatenate([d3, e1]) # 16x256 d5 = layers.Conv2D(128, kernel_size=4, strides=(1, 1), padding='same', kernel_initializer=initializer)(d4) # 8x256 d6 = layer_utils.upconv(d5, nfilters=64, size=4, strides=1) # 32x64 d7 = layers.concatenate([d6, feat_in]) # 32xN d8 = layers.Conv2D(out_filters, kernel_size=4, strides=(1, 1), padding='same', kernel_initializer=initializer)(d7) # 32xout return d8 def neural_voxel_renderer_plus(voxels, rerendering, light_pos, size=4, norm2d='batchnorm', norm3d='batchnorm'): """Neural Voxel Renderer + keras model.""" with tf.name_scope('Network/'): voxels = layers.Input(tensor=voxels) rerendering = layers.Input(tensor=rerendering) light_pos = layers.Input(tensor=light_pos) nf_2d = 512 with tf.name_scope('VoxelProcessing'): vol0_a = layer_utils.conv_block_3d(voxels, nfilters=16, size=size, strides=2, normalization=norm3d) # 64x64x64x16 vol0_b = layer_utils.conv_block_3d(vol0_a, nfilters=16, size=size, strides=1, normalization=norm3d) # 64x64x64x16 vol1_a = layer_utils.conv_block_3d(vol0_b, nfilters=16, size=size, strides=2, normalization=norm3d) # 32x32x32x16 vol1_b = layer_utils.conv_block_3d(vol1_a, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 vol1_c = layer_utils.conv_block_3d(vol1_b, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 shortcut = vol1_c vol_a1 = layer_utils.residual_block_3d(vol1_c, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a2 = layer_utils.residual_block_3d(vol_a1, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a3 = layer_utils.residual_block_3d(vol_a2, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a4 = layer_utils.residual_block_3d(vol_a3, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a5 = layer_utils.residual_block_3d(vol_a4, 32, strides=(1, 1, 1), normalization=norm3d) # 32x encoded_vol = layers.add([shortcut, vol_a5]) encoded_vol = layers.Reshape([32, 32, 32*32])(encoded_vol) encoded_vol = layers.Conv2D(nf_2d, kernel_size=1, strides=(1, 1), padding='same', kernel_initializer=initializer)(encoded_vol) latent_projection = layers.LeakyReLU()(encoded_vol) # 32x32x512 with tf.name_scope('ProjectionProcessing'): shortcut = latent_projection # 32x32xnf_2d e1 = layer_utils.residual_block_2d(latent_projection, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e2 = layer_utils.residual_block_2d(e1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e3 = layer_utils.residual_block_2d(e2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e4 = layer_utils.residual_block_2d(e3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e5 = layer_utils.residual_block_2d(e4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d encoded_proj = layers.add([shortcut, e5]) # 32x32xnf_2d with tf.name_scope('LightProcessing'): fc_light = layers.Dense(64, kernel_initializer=initializer)(light_pos) light_code = layers.Dense(64, kernel_initializer=initializer)(fc_light) light_code = \ layers.Lambda(lambda v: tf.tile(v[0], [1, 32*32]))([light_code]) light_code = layers.Reshape((32, 32, 64))(light_code) # 32x32x64 with tf.name_scope('Merger'): latent_code_final = layers.concatenate([encoded_proj, light_code]) latent_code_final = layer_utils.conv_block_2d(latent_code_final, nfilters=nf_2d, size=size, strides=1, normalization=norm3d) shortcut = latent_code_final m1 = layer_utils.residual_block_2d(latent_code_final, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m2 = layer_utils.residual_block_2d(m1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m3 = layer_utils.residual_block_2d(m2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m4 = layer_utils.residual_block_2d(m3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m5 = layer_utils.residual_block_2d(m4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d latent_code_final2 = layers.add([shortcut, m5]) # 32x32xnf_2d with tf.name_scope('Decoder'): d7 = layer_utils.conv_t_block_2d(latent_code_final2, nfilters=128, size=size, strides=2, normalization=norm2d) # 64x64x128 d7 = layer_utils.conv_block_2d(d7, nfilters=128, size=size, strides=1, normalization=norm2d) # 64x64x128 d8 = layer_utils.conv_t_block_2d(d7, nfilters=64, size=size, strides=2, normalization=norm2d) # 128x128x64 d8 = layer_utils.conv_block_2d(d8, nfilters=64, size=size, strides=1, normalization=norm2d) # 128x128x64 d9 = layer_utils.conv_t_block_2d(d8, nfilters=32, size=size, strides=2, normalization=norm2d) # 256x256x32 d9 = layer_utils.conv_block_2d(d9, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x256x32 rendered_image = layers.Conv2D(32, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(d9) # 256x256x3 with tf.name_scope('ImageProcessingNetwork'): ec1 = layer_utils.conv_block_2d(rerendering, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x ec2 = layer_utils.conv_block_2d(ec1, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x with tf.name_scope('NeuralRerenderingNetwork'): latent_img = layers.add([rendered_image, ec2]) target_code = unet_3x_with_res_in_mid(latent_img, 32, norm2d=norm2d) out0 = layer_utils.conv_block_2d(target_code, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x predicted_image = layers.Conv2D(3, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(out0) # 256x256x3 return tf.keras.Model(inputs=[voxels, rerendering, light_pos], outputs=[predicted_image]) def neural_voxel_renderer_plus_tf2(size=4, norm2d='batchnorm', norm3d='batchnorm'): """Neural Voxel Renderer + keras model for tf2.""" with tf.name_scope('Network/'): voxels = layers.Input(shape=[128, 128, 128, 4]) rerendering = layers.Input(shape=[256, 256, 3]) light_pos = layers.Input(shape=[3]) nf_2d = 512 with tf.name_scope('VoxelProcessing'): vol0_a = layer_utils.conv_block_3d(voxels, nfilters=16, size=size, strides=2, normalization=norm3d) # 64x64x64x16 vol0_b = layer_utils.conv_block_3d(vol0_a, nfilters=16, size=size, strides=1, normalization=norm3d) # 64x64x64x16 vol1_a = layer_utils.conv_block_3d(vol0_b, nfilters=16, size=size, strides=2, normalization=norm3d) # 32x32x32x16 vol1_b = layer_utils.conv_block_3d(vol1_a, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 vol1_c = layer_utils.conv_block_3d(vol1_b, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 shortcut = vol1_c vol_a1 = layer_utils.residual_block_3d(vol1_c, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a2 = layer_utils.residual_block_3d(vol_a1, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a3 = layer_utils.residual_block_3d(vol_a2, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a4 = layer_utils.residual_block_3d(vol_a3, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a5 = layer_utils.residual_block_3d(vol_a4, 32, strides=(1, 1, 1), normalization=norm3d) # 32x encoded_vol = layers.add([shortcut, vol_a5]) encoded_vol = layers.Reshape([32, 32, 32*32])(encoded_vol) encoded_vol = layers.Conv2D(nf_2d, kernel_size=1, strides=(1, 1), padding='same', kernel_initializer=initializer)(encoded_vol) latent_projection = layers.LeakyReLU()(encoded_vol) # 32x32x512 with tf.name_scope('ProjectionProcessing'): shortcut = latent_projection # 32x32xnf_2d e1 = layer_utils.residual_block_2d(latent_projection, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e2 = layer_utils.residual_block_2d(e1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e3 = layer_utils.residual_block_2d(e2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e4 = layer_utils.residual_block_2d(e3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e5 = layer_utils.residual_block_2d(e4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d encoded_proj = layers.add([shortcut, e5]) # 32x32xnf_2d with tf.name_scope('LightProcessing'): fc_light = layers.Dense(64, kernel_initializer=initializer)(light_pos) light_code = layers.Dense(64, kernel_initializer=initializer)(fc_light) light_code = \ layers.Lambda(lambda v: tf.tile(v[0], [1, 32*32]))([light_code]) light_code = layers.Reshape((32, 32, 64))(light_code) # 32x32x64 with tf.name_scope('Merger'): latent_code_final = layers.concatenate([encoded_proj, light_code]) latent_code_final = layer_utils.conv_block_2d(latent_code_final, nfilters=nf_2d, size=size, strides=1, normalization=norm3d) shortcut = latent_code_final m1 = layer_utils.residual_block_2d(latent_code_final, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m2 = layer_utils.residual_block_2d(m1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m3 = layer_utils.residual_block_2d(m2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m4 = layer_utils.residual_block_2d(m3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m5 = layer_utils.residual_block_2d(m4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d latent_code_final2 = layers.add([shortcut, m5]) # 32x32xnf_2d with tf.name_scope('Decoder'): d7 = layer_utils.conv_t_block_2d(latent_code_final2, nfilters=128, size=size, strides=2, normalization=norm2d) # 64x64x128 d7 = layer_utils.conv_block_2d(d7, nfilters=128, size=size, strides=1, normalization=norm2d) # 64x64x128 d8 = layer_utils.conv_t_block_2d(d7, nfilters=64, size=size, strides=2, normalization=norm2d) # 128x128x64 d8 = layer_utils.conv_block_2d(d8, nfilters=64, size=size, strides=1, normalization=norm2d) # 128x128x64 d9 = layer_utils.conv_t_block_2d(d8, nfilters=32, size=size, strides=2, normalization=norm2d) # 256x256x32 d9 = layer_utils.conv_block_2d(d9, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x256x32 rendered_image = layers.Conv2D(32, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(d9) # 256x256x3 with tf.name_scope('ImageProcessingNetwork'): ec1 = layer_utils.conv_block_2d(rerendering, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x ec2 = layer_utils.conv_block_2d(ec1, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x with tf.name_scope('NeuralRerenderingNetwork'): latent_img = layers.add([rendered_image, ec2]) target_code = unet_3x_with_res_in_mid(latent_img, 32, norm2d=norm2d) out0 = layer_utils.conv_block_2d(target_code, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x predicted_image = layers.Conv2D(3, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(out0) # 256x256x3 return tf.keras.Model(inputs=[voxels, rerendering, light_pos], outputs=[predicted_image])
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Definition of NVR+ keras model.""" import tensorflow.compat.v1 as tf import tensorflow_graphics.projects.neural_voxel_renderer.layers as layer_utils initializer = tf.keras.initializers.glorot_normal() layers = tf.keras.layers def unet_3x_with_res_in_mid(feat_in, out_filters, norm2d): """Helper function of a Unet with res blocks in the middle.""" e1 = layer_utils.residual_block_2d(feat_in, nfilters=128, strides=(2, 2), normalization=norm2d) # 16x128 e2 = layer_utils.residual_block_2d(e1, nfilters=256, strides=(2, 2), normalization=norm2d) # 8x256 e3 = layer_utils.residual_block_2d(e2, nfilters=512, strides=(2, 2), normalization=norm2d) # 4x512 mid1 = layer_utils.residual_block_2d(e3, nfilters=512, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d mid2 = layer_utils.residual_block_2d(mid1, nfilters=512, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d mid3 = layer_utils.residual_block_2d(mid2, nfilters=512, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d d0 = layer_utils.upconv(mid3, nfilters=256, size=4, strides=1) # 8x256 d1 = layers.concatenate([d0, e2]) # 8x512 d2 = layers.Conv2D(256, kernel_size=4, strides=(1, 1), padding='same', kernel_initializer=initializer)(d1) # 8x256 d3 = layer_utils.upconv(d2, nfilters=128, size=4, strides=1) # 16x128 d4 = layers.concatenate([d3, e1]) # 16x256 d5 = layers.Conv2D(128, kernel_size=4, strides=(1, 1), padding='same', kernel_initializer=initializer)(d4) # 8x256 d6 = layer_utils.upconv(d5, nfilters=64, size=4, strides=1) # 32x64 d7 = layers.concatenate([d6, feat_in]) # 32xN d8 = layers.Conv2D(out_filters, kernel_size=4, strides=(1, 1), padding='same', kernel_initializer=initializer)(d7) # 32xout return d8 def neural_voxel_renderer_plus(voxels, rerendering, light_pos, size=4, norm2d='batchnorm', norm3d='batchnorm'): """Neural Voxel Renderer + keras model.""" with tf.name_scope('Network/'): voxels = layers.Input(tensor=voxels) rerendering = layers.Input(tensor=rerendering) light_pos = layers.Input(tensor=light_pos) nf_2d = 512 with tf.name_scope('VoxelProcessing'): vol0_a = layer_utils.conv_block_3d(voxels, nfilters=16, size=size, strides=2, normalization=norm3d) # 64x64x64x16 vol0_b = layer_utils.conv_block_3d(vol0_a, nfilters=16, size=size, strides=1, normalization=norm3d) # 64x64x64x16 vol1_a = layer_utils.conv_block_3d(vol0_b, nfilters=16, size=size, strides=2, normalization=norm3d) # 32x32x32x16 vol1_b = layer_utils.conv_block_3d(vol1_a, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 vol1_c = layer_utils.conv_block_3d(vol1_b, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 shortcut = vol1_c vol_a1 = layer_utils.residual_block_3d(vol1_c, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a2 = layer_utils.residual_block_3d(vol_a1, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a3 = layer_utils.residual_block_3d(vol_a2, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a4 = layer_utils.residual_block_3d(vol_a3, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a5 = layer_utils.residual_block_3d(vol_a4, 32, strides=(1, 1, 1), normalization=norm3d) # 32x encoded_vol = layers.add([shortcut, vol_a5]) encoded_vol = layers.Reshape([32, 32, 32*32])(encoded_vol) encoded_vol = layers.Conv2D(nf_2d, kernel_size=1, strides=(1, 1), padding='same', kernel_initializer=initializer)(encoded_vol) latent_projection = layers.LeakyReLU()(encoded_vol) # 32x32x512 with tf.name_scope('ProjectionProcessing'): shortcut = latent_projection # 32x32xnf_2d e1 = layer_utils.residual_block_2d(latent_projection, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e2 = layer_utils.residual_block_2d(e1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e3 = layer_utils.residual_block_2d(e2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e4 = layer_utils.residual_block_2d(e3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e5 = layer_utils.residual_block_2d(e4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d encoded_proj = layers.add([shortcut, e5]) # 32x32xnf_2d with tf.name_scope('LightProcessing'): fc_light = layers.Dense(64, kernel_initializer=initializer)(light_pos) light_code = layers.Dense(64, kernel_initializer=initializer)(fc_light) light_code = \ layers.Lambda(lambda v: tf.tile(v[0], [1, 32*32]))([light_code]) light_code = layers.Reshape((32, 32, 64))(light_code) # 32x32x64 with tf.name_scope('Merger'): latent_code_final = layers.concatenate([encoded_proj, light_code]) latent_code_final = layer_utils.conv_block_2d(latent_code_final, nfilters=nf_2d, size=size, strides=1, normalization=norm3d) shortcut = latent_code_final m1 = layer_utils.residual_block_2d(latent_code_final, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m2 = layer_utils.residual_block_2d(m1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m3 = layer_utils.residual_block_2d(m2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m4 = layer_utils.residual_block_2d(m3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m5 = layer_utils.residual_block_2d(m4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d latent_code_final2 = layers.add([shortcut, m5]) # 32x32xnf_2d with tf.name_scope('Decoder'): d7 = layer_utils.conv_t_block_2d(latent_code_final2, nfilters=128, size=size, strides=2, normalization=norm2d) # 64x64x128 d7 = layer_utils.conv_block_2d(d7, nfilters=128, size=size, strides=1, normalization=norm2d) # 64x64x128 d8 = layer_utils.conv_t_block_2d(d7, nfilters=64, size=size, strides=2, normalization=norm2d) # 128x128x64 d8 = layer_utils.conv_block_2d(d8, nfilters=64, size=size, strides=1, normalization=norm2d) # 128x128x64 d9 = layer_utils.conv_t_block_2d(d8, nfilters=32, size=size, strides=2, normalization=norm2d) # 256x256x32 d9 = layer_utils.conv_block_2d(d9, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x256x32 rendered_image = layers.Conv2D(32, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(d9) # 256x256x3 with tf.name_scope('ImageProcessingNetwork'): ec1 = layer_utils.conv_block_2d(rerendering, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x ec2 = layer_utils.conv_block_2d(ec1, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x with tf.name_scope('NeuralRerenderingNetwork'): latent_img = layers.add([rendered_image, ec2]) target_code = unet_3x_with_res_in_mid(latent_img, 32, norm2d=norm2d) out0 = layer_utils.conv_block_2d(target_code, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x predicted_image = layers.Conv2D(3, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(out0) # 256x256x3 return tf.keras.Model(inputs=[voxels, rerendering, light_pos], outputs=[predicted_image]) def neural_voxel_renderer_plus_tf2(size=4, norm2d='batchnorm', norm3d='batchnorm'): """Neural Voxel Renderer + keras model for tf2.""" with tf.name_scope('Network/'): voxels = layers.Input(shape=[128, 128, 128, 4]) rerendering = layers.Input(shape=[256, 256, 3]) light_pos = layers.Input(shape=[3]) nf_2d = 512 with tf.name_scope('VoxelProcessing'): vol0_a = layer_utils.conv_block_3d(voxels, nfilters=16, size=size, strides=2, normalization=norm3d) # 64x64x64x16 vol0_b = layer_utils.conv_block_3d(vol0_a, nfilters=16, size=size, strides=1, normalization=norm3d) # 64x64x64x16 vol1_a = layer_utils.conv_block_3d(vol0_b, nfilters=16, size=size, strides=2, normalization=norm3d) # 32x32x32x16 vol1_b = layer_utils.conv_block_3d(vol1_a, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 vol1_c = layer_utils.conv_block_3d(vol1_b, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 shortcut = vol1_c vol_a1 = layer_utils.residual_block_3d(vol1_c, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a2 = layer_utils.residual_block_3d(vol_a1, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a3 = layer_utils.residual_block_3d(vol_a2, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a4 = layer_utils.residual_block_3d(vol_a3, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a5 = layer_utils.residual_block_3d(vol_a4, 32, strides=(1, 1, 1), normalization=norm3d) # 32x encoded_vol = layers.add([shortcut, vol_a5]) encoded_vol = layers.Reshape([32, 32, 32*32])(encoded_vol) encoded_vol = layers.Conv2D(nf_2d, kernel_size=1, strides=(1, 1), padding='same', kernel_initializer=initializer)(encoded_vol) latent_projection = layers.LeakyReLU()(encoded_vol) # 32x32x512 with tf.name_scope('ProjectionProcessing'): shortcut = latent_projection # 32x32xnf_2d e1 = layer_utils.residual_block_2d(latent_projection, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e2 = layer_utils.residual_block_2d(e1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e3 = layer_utils.residual_block_2d(e2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e4 = layer_utils.residual_block_2d(e3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e5 = layer_utils.residual_block_2d(e4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d encoded_proj = layers.add([shortcut, e5]) # 32x32xnf_2d with tf.name_scope('LightProcessing'): fc_light = layers.Dense(64, kernel_initializer=initializer)(light_pos) light_code = layers.Dense(64, kernel_initializer=initializer)(fc_light) light_code = \ layers.Lambda(lambda v: tf.tile(v[0], [1, 32*32]))([light_code]) light_code = layers.Reshape((32, 32, 64))(light_code) # 32x32x64 with tf.name_scope('Merger'): latent_code_final = layers.concatenate([encoded_proj, light_code]) latent_code_final = layer_utils.conv_block_2d(latent_code_final, nfilters=nf_2d, size=size, strides=1, normalization=norm3d) shortcut = latent_code_final m1 = layer_utils.residual_block_2d(latent_code_final, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m2 = layer_utils.residual_block_2d(m1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m3 = layer_utils.residual_block_2d(m2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m4 = layer_utils.residual_block_2d(m3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m5 = layer_utils.residual_block_2d(m4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d latent_code_final2 = layers.add([shortcut, m5]) # 32x32xnf_2d with tf.name_scope('Decoder'): d7 = layer_utils.conv_t_block_2d(latent_code_final2, nfilters=128, size=size, strides=2, normalization=norm2d) # 64x64x128 d7 = layer_utils.conv_block_2d(d7, nfilters=128, size=size, strides=1, normalization=norm2d) # 64x64x128 d8 = layer_utils.conv_t_block_2d(d7, nfilters=64, size=size, strides=2, normalization=norm2d) # 128x128x64 d8 = layer_utils.conv_block_2d(d8, nfilters=64, size=size, strides=1, normalization=norm2d) # 128x128x64 d9 = layer_utils.conv_t_block_2d(d8, nfilters=32, size=size, strides=2, normalization=norm2d) # 256x256x32 d9 = layer_utils.conv_block_2d(d9, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x256x32 rendered_image = layers.Conv2D(32, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(d9) # 256x256x3 with tf.name_scope('ImageProcessingNetwork'): ec1 = layer_utils.conv_block_2d(rerendering, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x ec2 = layer_utils.conv_block_2d(ec1, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x with tf.name_scope('NeuralRerenderingNetwork'): latent_img = layers.add([rendered_image, ec2]) target_code = unet_3x_with_res_in_mid(latent_img, 32, norm2d=norm2d) out0 = layer_utils.conv_block_2d(target_code, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x predicted_image = layers.Conv2D(3, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(out0) # 256x256x3 return tf.keras.Model(inputs=[voxels, rerendering, light_pos], outputs=[predicted_image])
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/rendering/reflectance/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Reflectance module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_graphics.rendering.reflectance import blinn_phong from tensorflow_graphics.rendering.reflectance import lambertian from tensorflow_graphics.rendering.reflectance import phong from tensorflow_graphics.util import export_api as _export_api # API contains submodules of tensorflow_graphics.rendering.reflectance. __all__ = _export_api.get_modules()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Reflectance module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_graphics.rendering.reflectance import blinn_phong from tensorflow_graphics.rendering.reflectance import lambertian from tensorflow_graphics.rendering.reflectance import phong from tensorflow_graphics.util import export_api as _export_api # API contains submodules of tensorflow_graphics.rendering.reflectance. __all__ = _export_api.get_modules()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/nn/metric/tests/fscore_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the fscore metric.""" from absl.testing import parameterized import numpy as np from tensorflow_graphics.nn.metric import fscore from tensorflow_graphics.nn.metric import precision from tensorflow_graphics.nn.metric import recall from tensorflow_graphics.util import test_case def random_tensor(tensor_shape): return np.random.uniform(low=0.0, high=1.0, size=tensor_shape) def random_tensor_shape(): tensor_size = np.random.randint(5) + 1 return np.random.randint(1, 10, size=(tensor_size)).tolist() def binary_precision_function(ground_truth, predictions): return precision.evaluate(ground_truth, predictions, classes=[1]) def binary_recall_function(ground_truth, predictions): return recall.evaluate(ground_truth, predictions, classes=[1]) class FscoreTest(test_case.TestCase): @parameterized.parameters( # Precision = 0.5, Recall = 0.25. ((0, 1, 1, 1, 1), (1, 1, 0, 0, 0), 2 * (0.5 * 0.25) / (0.5 + 0.25)), # Precision = 1, Recall = 1. ((0, 0, 0, 1, 1, 1, 0, 1), (0, 0, 0, 1, 1, 1, 0, 1), 1), # Precision = 0, Recall = 0. ((0, 1, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0)) def test_evaluate_preset(self, ground_truth, predictions, expected_fscore): tensor_shape = random_tensor_shape() ground_truth_labels = np.tile(ground_truth, tensor_shape + [1]) predicted_labels = np.tile(predictions, tensor_shape + [1]) expected = np.tile(expected_fscore, tensor_shape) result = fscore.evaluate( ground_truth_labels, predicted_labels, precision_function=binary_precision_function, recall_function=binary_recall_function) self.assertAllClose(expected, result) @parameterized.parameters( ("Not all batch dimensions are broadcast-compatible.", (1, 5, 3), (4, 3)), ("Not all batch dimensions are broadcast-compatible.", (3, 4), (2, 4, 5)), ) def test_evaluate_shape_exception_raised(self, error_msg, *shape): """Tests that the shape exception is raised.""" self.assert_exception_is_raised(fscore.evaluate, error_msg, shape) @parameterized.parameters( ((1, 5, 3), (2, 5, 1)), ((None, 2, 6), (4, 2, None)), ((3, 1, 1, 2), (3, 5, 8, 2)), ) def test_evaluate_shape_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(fscore.evaluate, shapes) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the fscore metric.""" from absl.testing import parameterized import numpy as np from tensorflow_graphics.nn.metric import fscore from tensorflow_graphics.nn.metric import precision from tensorflow_graphics.nn.metric import recall from tensorflow_graphics.util import test_case def random_tensor(tensor_shape): return np.random.uniform(low=0.0, high=1.0, size=tensor_shape) def random_tensor_shape(): tensor_size = np.random.randint(5) + 1 return np.random.randint(1, 10, size=(tensor_size)).tolist() def binary_precision_function(ground_truth, predictions): return precision.evaluate(ground_truth, predictions, classes=[1]) def binary_recall_function(ground_truth, predictions): return recall.evaluate(ground_truth, predictions, classes=[1]) class FscoreTest(test_case.TestCase): @parameterized.parameters( # Precision = 0.5, Recall = 0.25. ((0, 1, 1, 1, 1), (1, 1, 0, 0, 0), 2 * (0.5 * 0.25) / (0.5 + 0.25)), # Precision = 1, Recall = 1. ((0, 0, 0, 1, 1, 1, 0, 1), (0, 0, 0, 1, 1, 1, 0, 1), 1), # Precision = 0, Recall = 0. ((0, 1, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0)) def test_evaluate_preset(self, ground_truth, predictions, expected_fscore): tensor_shape = random_tensor_shape() ground_truth_labels = np.tile(ground_truth, tensor_shape + [1]) predicted_labels = np.tile(predictions, tensor_shape + [1]) expected = np.tile(expected_fscore, tensor_shape) result = fscore.evaluate( ground_truth_labels, predicted_labels, precision_function=binary_precision_function, recall_function=binary_recall_function) self.assertAllClose(expected, result) @parameterized.parameters( ("Not all batch dimensions are broadcast-compatible.", (1, 5, 3), (4, 3)), ("Not all batch dimensions are broadcast-compatible.", (3, 4), (2, 4, 5)), ) def test_evaluate_shape_exception_raised(self, error_msg, *shape): """Tests that the shape exception is raised.""" self.assert_exception_is_raised(fscore.evaluate, error_msg, shape) @parameterized.parameters( ((1, 5, 3), (2, 5, 1)), ((None, 2, 6), (4, 2, None)), ((3, 1, 1, 2), (3, 5, 8, 2)), ) def test_evaluate_shape_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(fscore.evaluate, shapes) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/nn/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Neural Network module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=g-import-not-at-top from tensorflow_graphics.util.doc import _import_tfg_docs if _import_tfg_docs(): from tensorflow_graphics.nn import layer from tensorflow_graphics.nn import loss from tensorflow_graphics.nn import metric from tensorflow_graphics.util import export_api as _export_api # API contains submodules of tensorflow_graphics.nn. __all__ = _export_api.get_modules() # pylint: enable=g-import-not-at-top
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Neural Network module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=g-import-not-at-top from tensorflow_graphics.util.doc import _import_tfg_docs if _import_tfg_docs(): from tensorflow_graphics.nn import layer from tensorflow_graphics.nn import loss from tensorflow_graphics.nn import metric from tensorflow_graphics.util import export_api as _export_api # API contains submodules of tensorflow_graphics.nn. __all__ = _export_api.get_modules() # pylint: enable=g-import-not-at-top
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/rendering/kernels/rasterize_triangles_impl.cc
/* Copyright 2020 The TensorFlow Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "py/tensorflow_graphics/rendering/kernels/rasterize_triangles_impl.h" #include <algorithm> #include <cmath> namespace { using fixed_t = int64; // Converts to fixed point with 16 fractional bits and 48 integer bits. // TODO(fcole): fixed-point depth may be too shallow. // The algorithm requires multiplying two of the xyzw clip-space coordinates // together, summing, and then multiplying by an NDC pixel coordinate (three // total multiplies). After three multiplications, the fractional part will be // 48 bits, leaving 16 bits for the integer part. The NDC pixel coordinates // are in (-1,1) so they need only 1 integer bit, so as long as the values of // the inverse matrix are < 2^15, the fixed-point math should not overflow. This // seems a bit dicey but so far all the tests I've tried pass. constexpr int kFractionalBits = 16; constexpr fixed_t ShiftPointLeft(fixed_t x) { return x << kFractionalBits; } constexpr fixed_t ToFixedPoint(float f) { return static_cast<fixed_t>(f * ShiftPointLeft(1)); } // Takes the minimum of a and b, rounds down, and converts to an integer in // the range [low, high]. inline int ClampedIntegerMin(float a, float b, int low, int high) { const float value = std::floor(std::min(a, b)); return static_cast<int>( std::clamp(value, static_cast<float>(low), static_cast<float>(high))); } // Takes the maximum of a and b, rounds up, and converts to an integer in the // range [low, high]. inline int ClampedIntegerMax(float a, float b, int low, int high) { const float value = std::ceil(std::max(a, b)); return static_cast<int>( std::clamp(value, static_cast<float>(low), static_cast<float>(high))); } // Return true if the near plane is between the eye and the clip-space point // with the provided z and w. inline bool IsClipPointVisible(float z, float w) { return w > 0 && z >= -w; } // Computes the screen-space bounding box of the given clip-space triangle and // stores it into [left, right, bottom, top], where left and bottom limits are // inclusive while right and top are not. // Returns true if the bounding box includes any screen pixels. bool ComputeTriangleBoundingBox(float v0x, float v0y, float v0z, float v0w, float v1x, float v1y, float v1z, float v1w, float v2x, float v2y, float v2z, float v2w, int image_width, int image_height, int* left, int* right, int* bottom, int* top) { // If the triangle is entirely visible, project the vertices to pixel // coordinates and find the triangle bounding box enlarged to the nearest // integer and clamped to the image boundaries. If the triangle is not // entirely visible, intersect the edges that cross the near plane with the // near plane and use those to compute screen bounds instead. *left = image_width; *right = 0; *bottom = image_height; *top = 0; auto add_point = [&](float x, float y, float w) { const float px = 0.5f * (x / w + 1) * image_width; const float py = 0.5f * (y / w + 1) * image_height; *left = ClampedIntegerMin(*left, px, 0, image_width); *right = ClampedIntegerMax(*right, px, 0, image_width); *bottom = ClampedIntegerMin(*bottom, py, 0, image_height); *top = ClampedIntegerMax(*top, py, 0, image_height); }; auto add_near_point = [&](float x0, float y0, float z0, float w0, float x1, float y1, float z1, float w1) { const float denom = z0 - z1 + w0 - w1; if (denom != 0) { // Interpolate to near plane, where z/w == -1. const float t = (z0 + w0) / denom; const float x = x0 + t * (x1 - x0); const float y = y0 + t * (y1 - y0); const float w = w0 + t * (w1 - w0); add_point(x, y, w); } }; const bool visible_v0 = IsClipPointVisible(v0z, v0w); const bool visible_v1 = IsClipPointVisible(v1z, v1w); const bool visible_v2 = IsClipPointVisible(v2z, v2w); if (visible_v0) { add_point(v0x, v0y, v0w); if (!visible_v1) add_near_point(v0x, v0y, v0z, v0w, v1x, v1y, v1z, v1w); if (!visible_v2) add_near_point(v0x, v0y, v0z, v0w, v2x, v2y, v2z, v2w); } if (visible_v1) { add_point(v1x, v1y, v1w); if (!visible_v2) add_near_point(v1x, v1y, v1z, v1w, v2x, v2y, v2z, v2w); if (!visible_v0) add_near_point(v1x, v1y, v1z, v1w, v0x, v0y, v0z, v0w); } if (visible_v2) { add_point(v2x, v2y, v2w); if (!visible_v0) add_near_point(v2x, v2y, v2z, v2w, v0x, v0y, v0z, v0w); if (!visible_v1) add_near_point(v2x, v2y, v2z, v2w, v1x, v1y, v1z, v1w); } const bool is_valid = (*right > *left) && (*top > *bottom); return is_valid; } // Computes a 3x3 matrix inverse without dividing by the determinant. // Instead, makes an unnormalized matrix inverse with the correct sign // by flipping the sign of the matrix if the determinant is negative. // By leaving out determinant division, the rows of M^-1 only depend on two out // of three of the columns of M; i.e., the first row of M^-1 only depends on the // second and third columns of M, the second only depends on the first and // third, etc. This means we can compute edge functions for two neighboring // triangles independently and produce exactly the same numerical result up to // the sign. // See http://mathworld.wolfram.com/MatrixInverse.html // Culling is accomplished by inspecting the sign of the determinant as in: // "Incremental and Hierarchical Hilbert Order Edge Equation Polygon // Rasterization," McCool, et al., 2001 void ComputeUnnormalizedMatrixInverse( const fixed_t a11, const fixed_t a12, const fixed_t a13, const fixed_t a21, const fixed_t a22, const fixed_t a23, const fixed_t a31, const fixed_t a32, const fixed_t a33, const FaceCullingMode culling_mode, fixed_t m_inv[9]) { m_inv[0] = a22 * a33 - a32 * a23; m_inv[1] = a13 * a32 - a33 * a12; m_inv[2] = a12 * a23 - a22 * a13; m_inv[3] = a23 * a31 - a33 * a21; m_inv[4] = a11 * a33 - a31 * a13; m_inv[5] = a13 * a21 - a23 * a11; m_inv[6] = a21 * a32 - a31 * a22; m_inv[7] = a12 * a31 - a32 * a11; m_inv[8] = a11 * a22 - a21 * a12; // If the culling mode is kBack, leave the sign of the matrix unchanged. // Transfer the sign of the determinant if mode is kNone. If mode is kFront, // just invert the matrix. if (culling_mode == FaceCullingMode::kNone || culling_mode == FaceCullingMode::kFront) { // The first column of the unnormalized M^-1 contains intermediate values // for det(M). const float det = a11 * m_inv[0] + a12 * m_inv[3] + a13 * m_inv[6]; const float multiplier = (culling_mode == FaceCullingMode::kNone) ? std::copysign(1.0, det) : -1.0; for (int i = 0; i < 9; ++i) { m_inv[i] *= multiplier; } } } // Computes the edge functions from M^-1 as described by Olano and Greer, // "Triangle Scan Conversion using 2D Homogeneous Coordinates." // // This function combines equations (3) and (4). It first computes // [a b c] = u_i * M^-1, where u_0 = [1 0 0], u_1 = [0 1 0], etc., // then computes edge_i = aX + bY + c void ComputeEdgeFunctions(const float px, const float py, const fixed_t m_inv[9], fixed_t values[3]) { const fixed_t px_i = ToFixedPoint(px); const fixed_t py_i = ToFixedPoint(py); for (int i = 0; i < 3; ++i) { const fixed_t a = m_inv[3 * i + 0]; const fixed_t b = m_inv[3 * i + 1]; const fixed_t c = m_inv[3 * i + 2]; // Before summing, shift the point of c to align with the products of // multiplication. values[i] = a * px_i + b * py_i + ShiftPointLeft(c); } } // Determines whether the point p lies inside a triangle. Counts pixels exactly // on an edge as inside the triangle, as long as the triangle is not degenerate. // Degenerate (zero-area) triangles always fail the inside test. bool PixelIsInsideTriangle(const fixed_t edge_values[3]) { // Check that the edge values are all non-negative and that at least one is // positive (triangle is non-degenerate). return (edge_values[0] >= 0 && edge_values[1] >= 0 && edge_values[2] >= 0) && (edge_values[0] > 0 || edge_values[1] > 0 || edge_values[2] > 0); } } // namespace void RasterizeTrianglesImpl(const float* vertices, const int32* triangles, int32 triangle_count, int32 image_width, int32 image_height, int32 num_layers, FaceCullingMode face_culling_mode, int32* triangle_ids, float* z_buffer, float* barycentric_coordinates) { const float half_image_width = 0.5f * image_width; const float half_image_height = 0.5f * image_height; fixed_t unnormalized_matrix_inverse[9]; fixed_t b_over_w[3]; int left, right, bottom, top; for (int32 triangle_id = 0; triangle_id < triangle_count; ++triangle_id) { const int32 v0_x_id = 4 * triangles[3 * triangle_id]; const int32 v1_x_id = 4 * triangles[3 * triangle_id + 1]; const int32 v2_x_id = 4 * triangles[3 * triangle_id + 2]; const float v0x = vertices[v0_x_id]; const float v0y = vertices[v0_x_id + 1]; const float v0z = vertices[v0_x_id + 2]; const float v0w = vertices[v0_x_id + 3]; const float v1x = vertices[v1_x_id]; const float v1y = vertices[v1_x_id + 1]; const float v1z = vertices[v1_x_id + 2]; const float v1w = vertices[v1_x_id + 3]; const float v2x = vertices[v2_x_id]; const float v2y = vertices[v2_x_id + 1]; const float v2z = vertices[v2_x_id + 2]; const float v2w = vertices[v2_x_id + 3]; const bool is_valid = ComputeTriangleBoundingBox( v0x, v0y, v0z, v0w, v1x, v1y, v1z, v1w, v2x, v2y, v2z, v2w, image_width, image_height, &left, &right, &bottom, &top); // Ignore triangles that do not overlap with any screen pixels. if (!is_valid) continue; ComputeUnnormalizedMatrixInverse( ToFixedPoint(v0x), ToFixedPoint(v1x), ToFixedPoint(v2x), ToFixedPoint(v0y), ToFixedPoint(v1y), ToFixedPoint(v2y), ToFixedPoint(v0w), ToFixedPoint(v1w), ToFixedPoint(v2w), face_culling_mode, unnormalized_matrix_inverse); // Iterate over each pixel in the bounding box. for (int iy = bottom; iy < top; ++iy) { for (int ix = left; ix < right; ++ix) { const float px = ((ix + 0.5f) / half_image_width) - 1.0f; const float py = ((iy + 0.5f) / half_image_height) - 1.0f; ComputeEdgeFunctions(px, py, unnormalized_matrix_inverse, b_over_w); if (!PixelIsInsideTriangle(b_over_w)) { continue; } const float one_over_w = b_over_w[0] + b_over_w[1] + b_over_w[2]; const float b0 = b_over_w[0] / one_over_w; const float b1 = b_over_w[1] / one_over_w; const float b2 = b_over_w[2] / one_over_w; // Since we computed an unnormalized w above, we need to recompute // a properly scaled clip-space w value and then divide clip-space z // by that. const float clip_z = b0 * v0z + b1 * v1z + b2 * v2z; const float clip_w = b0 * v0w + b1 * v1w + b2 * v2w; const float z = clip_z / clip_w; // Skip the pixel if it is beyond the near or far clipping plane. if (z < -1.0f || z > 1.0f) continue; // Insert into appropriate depth layer with insertion sort. float z_next = z; int32 id_next = triangle_id; float b0_next = b0; float b1_next = b1; float b2_next = b2; const int pixel_idx0 = iy * image_width + ix; for (int layer = 0; layer < num_layers; ++layer) { const int pixel_idx = pixel_idx0 + image_height * image_width * layer; if (z_next < z_buffer[pixel_idx]) { std::swap(z_next, z_buffer[pixel_idx]); std::swap(id_next, triangle_ids[pixel_idx]); if (barycentric_coordinates != nullptr) { std::swap(b0_next, barycentric_coordinates[3 * pixel_idx + 0]); std::swap(b1_next, barycentric_coordinates[3 * pixel_idx + 1]); std::swap(b2_next, barycentric_coordinates[3 * pixel_idx + 2]); } } // Exit the loop early if the clear depth (z == 1) is reached. if (z_next == 1) break; } } } } }
/* Copyright 2020 The TensorFlow Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "py/tensorflow_graphics/rendering/kernels/rasterize_triangles_impl.h" #include <algorithm> #include <cmath> namespace { using fixed_t = int64; // Converts to fixed point with 16 fractional bits and 48 integer bits. // TODO(fcole): fixed-point depth may be too shallow. // The algorithm requires multiplying two of the xyzw clip-space coordinates // together, summing, and then multiplying by an NDC pixel coordinate (three // total multiplies). After three multiplications, the fractional part will be // 48 bits, leaving 16 bits for the integer part. The NDC pixel coordinates // are in (-1,1) so they need only 1 integer bit, so as long as the values of // the inverse matrix are < 2^15, the fixed-point math should not overflow. This // seems a bit dicey but so far all the tests I've tried pass. constexpr int kFractionalBits = 16; constexpr fixed_t ShiftPointLeft(fixed_t x) { return x << kFractionalBits; } constexpr fixed_t ToFixedPoint(float f) { return static_cast<fixed_t>(f * ShiftPointLeft(1)); } // Takes the minimum of a and b, rounds down, and converts to an integer in // the range [low, high]. inline int ClampedIntegerMin(float a, float b, int low, int high) { const float value = std::floor(std::min(a, b)); return static_cast<int>( std::clamp(value, static_cast<float>(low), static_cast<float>(high))); } // Takes the maximum of a and b, rounds up, and converts to an integer in the // range [low, high]. inline int ClampedIntegerMax(float a, float b, int low, int high) { const float value = std::ceil(std::max(a, b)); return static_cast<int>( std::clamp(value, static_cast<float>(low), static_cast<float>(high))); } // Return true if the near plane is between the eye and the clip-space point // with the provided z and w. inline bool IsClipPointVisible(float z, float w) { return w > 0 && z >= -w; } // Computes the screen-space bounding box of the given clip-space triangle and // stores it into [left, right, bottom, top], where left and bottom limits are // inclusive while right and top are not. // Returns true if the bounding box includes any screen pixels. bool ComputeTriangleBoundingBox(float v0x, float v0y, float v0z, float v0w, float v1x, float v1y, float v1z, float v1w, float v2x, float v2y, float v2z, float v2w, int image_width, int image_height, int* left, int* right, int* bottom, int* top) { // If the triangle is entirely visible, project the vertices to pixel // coordinates and find the triangle bounding box enlarged to the nearest // integer and clamped to the image boundaries. If the triangle is not // entirely visible, intersect the edges that cross the near plane with the // near plane and use those to compute screen bounds instead. *left = image_width; *right = 0; *bottom = image_height; *top = 0; auto add_point = [&](float x, float y, float w) { const float px = 0.5f * (x / w + 1) * image_width; const float py = 0.5f * (y / w + 1) * image_height; *left = ClampedIntegerMin(*left, px, 0, image_width); *right = ClampedIntegerMax(*right, px, 0, image_width); *bottom = ClampedIntegerMin(*bottom, py, 0, image_height); *top = ClampedIntegerMax(*top, py, 0, image_height); }; auto add_near_point = [&](float x0, float y0, float z0, float w0, float x1, float y1, float z1, float w1) { const float denom = z0 - z1 + w0 - w1; if (denom != 0) { // Interpolate to near plane, where z/w == -1. const float t = (z0 + w0) / denom; const float x = x0 + t * (x1 - x0); const float y = y0 + t * (y1 - y0); const float w = w0 + t * (w1 - w0); add_point(x, y, w); } }; const bool visible_v0 = IsClipPointVisible(v0z, v0w); const bool visible_v1 = IsClipPointVisible(v1z, v1w); const bool visible_v2 = IsClipPointVisible(v2z, v2w); if (visible_v0) { add_point(v0x, v0y, v0w); if (!visible_v1) add_near_point(v0x, v0y, v0z, v0w, v1x, v1y, v1z, v1w); if (!visible_v2) add_near_point(v0x, v0y, v0z, v0w, v2x, v2y, v2z, v2w); } if (visible_v1) { add_point(v1x, v1y, v1w); if (!visible_v2) add_near_point(v1x, v1y, v1z, v1w, v2x, v2y, v2z, v2w); if (!visible_v0) add_near_point(v1x, v1y, v1z, v1w, v0x, v0y, v0z, v0w); } if (visible_v2) { add_point(v2x, v2y, v2w); if (!visible_v0) add_near_point(v2x, v2y, v2z, v2w, v0x, v0y, v0z, v0w); if (!visible_v1) add_near_point(v2x, v2y, v2z, v2w, v1x, v1y, v1z, v1w); } const bool is_valid = (*right > *left) && (*top > *bottom); return is_valid; } // Computes a 3x3 matrix inverse without dividing by the determinant. // Instead, makes an unnormalized matrix inverse with the correct sign // by flipping the sign of the matrix if the determinant is negative. // By leaving out determinant division, the rows of M^-1 only depend on two out // of three of the columns of M; i.e., the first row of M^-1 only depends on the // second and third columns of M, the second only depends on the first and // third, etc. This means we can compute edge functions for two neighboring // triangles independently and produce exactly the same numerical result up to // the sign. // See http://mathworld.wolfram.com/MatrixInverse.html // Culling is accomplished by inspecting the sign of the determinant as in: // "Incremental and Hierarchical Hilbert Order Edge Equation Polygon // Rasterization," McCool, et al., 2001 void ComputeUnnormalizedMatrixInverse( const fixed_t a11, const fixed_t a12, const fixed_t a13, const fixed_t a21, const fixed_t a22, const fixed_t a23, const fixed_t a31, const fixed_t a32, const fixed_t a33, const FaceCullingMode culling_mode, fixed_t m_inv[9]) { m_inv[0] = a22 * a33 - a32 * a23; m_inv[1] = a13 * a32 - a33 * a12; m_inv[2] = a12 * a23 - a22 * a13; m_inv[3] = a23 * a31 - a33 * a21; m_inv[4] = a11 * a33 - a31 * a13; m_inv[5] = a13 * a21 - a23 * a11; m_inv[6] = a21 * a32 - a31 * a22; m_inv[7] = a12 * a31 - a32 * a11; m_inv[8] = a11 * a22 - a21 * a12; // If the culling mode is kBack, leave the sign of the matrix unchanged. // Transfer the sign of the determinant if mode is kNone. If mode is kFront, // just invert the matrix. if (culling_mode == FaceCullingMode::kNone || culling_mode == FaceCullingMode::kFront) { // The first column of the unnormalized M^-1 contains intermediate values // for det(M). const float det = a11 * m_inv[0] + a12 * m_inv[3] + a13 * m_inv[6]; const float multiplier = (culling_mode == FaceCullingMode::kNone) ? std::copysign(1.0, det) : -1.0; for (int i = 0; i < 9; ++i) { m_inv[i] *= multiplier; } } } // Computes the edge functions from M^-1 as described by Olano and Greer, // "Triangle Scan Conversion using 2D Homogeneous Coordinates." // // This function combines equations (3) and (4). It first computes // [a b c] = u_i * M^-1, where u_0 = [1 0 0], u_1 = [0 1 0], etc., // then computes edge_i = aX + bY + c void ComputeEdgeFunctions(const float px, const float py, const fixed_t m_inv[9], fixed_t values[3]) { const fixed_t px_i = ToFixedPoint(px); const fixed_t py_i = ToFixedPoint(py); for (int i = 0; i < 3; ++i) { const fixed_t a = m_inv[3 * i + 0]; const fixed_t b = m_inv[3 * i + 1]; const fixed_t c = m_inv[3 * i + 2]; // Before summing, shift the point of c to align with the products of // multiplication. values[i] = a * px_i + b * py_i + ShiftPointLeft(c); } } // Determines whether the point p lies inside a triangle. Counts pixels exactly // on an edge as inside the triangle, as long as the triangle is not degenerate. // Degenerate (zero-area) triangles always fail the inside test. bool PixelIsInsideTriangle(const fixed_t edge_values[3]) { // Check that the edge values are all non-negative and that at least one is // positive (triangle is non-degenerate). return (edge_values[0] >= 0 && edge_values[1] >= 0 && edge_values[2] >= 0) && (edge_values[0] > 0 || edge_values[1] > 0 || edge_values[2] > 0); } } // namespace void RasterizeTrianglesImpl(const float* vertices, const int32* triangles, int32 triangle_count, int32 image_width, int32 image_height, int32 num_layers, FaceCullingMode face_culling_mode, int32* triangle_ids, float* z_buffer, float* barycentric_coordinates) { const float half_image_width = 0.5f * image_width; const float half_image_height = 0.5f * image_height; fixed_t unnormalized_matrix_inverse[9]; fixed_t b_over_w[3]; int left, right, bottom, top; for (int32 triangle_id = 0; triangle_id < triangle_count; ++triangle_id) { const int32 v0_x_id = 4 * triangles[3 * triangle_id]; const int32 v1_x_id = 4 * triangles[3 * triangle_id + 1]; const int32 v2_x_id = 4 * triangles[3 * triangle_id + 2]; const float v0x = vertices[v0_x_id]; const float v0y = vertices[v0_x_id + 1]; const float v0z = vertices[v0_x_id + 2]; const float v0w = vertices[v0_x_id + 3]; const float v1x = vertices[v1_x_id]; const float v1y = vertices[v1_x_id + 1]; const float v1z = vertices[v1_x_id + 2]; const float v1w = vertices[v1_x_id + 3]; const float v2x = vertices[v2_x_id]; const float v2y = vertices[v2_x_id + 1]; const float v2z = vertices[v2_x_id + 2]; const float v2w = vertices[v2_x_id + 3]; const bool is_valid = ComputeTriangleBoundingBox( v0x, v0y, v0z, v0w, v1x, v1y, v1z, v1w, v2x, v2y, v2z, v2w, image_width, image_height, &left, &right, &bottom, &top); // Ignore triangles that do not overlap with any screen pixels. if (!is_valid) continue; ComputeUnnormalizedMatrixInverse( ToFixedPoint(v0x), ToFixedPoint(v1x), ToFixedPoint(v2x), ToFixedPoint(v0y), ToFixedPoint(v1y), ToFixedPoint(v2y), ToFixedPoint(v0w), ToFixedPoint(v1w), ToFixedPoint(v2w), face_culling_mode, unnormalized_matrix_inverse); // Iterate over each pixel in the bounding box. for (int iy = bottom; iy < top; ++iy) { for (int ix = left; ix < right; ++ix) { const float px = ((ix + 0.5f) / half_image_width) - 1.0f; const float py = ((iy + 0.5f) / half_image_height) - 1.0f; ComputeEdgeFunctions(px, py, unnormalized_matrix_inverse, b_over_w); if (!PixelIsInsideTriangle(b_over_w)) { continue; } const float one_over_w = b_over_w[0] + b_over_w[1] + b_over_w[2]; const float b0 = b_over_w[0] / one_over_w; const float b1 = b_over_w[1] / one_over_w; const float b2 = b_over_w[2] / one_over_w; // Since we computed an unnormalized w above, we need to recompute // a properly scaled clip-space w value and then divide clip-space z // by that. const float clip_z = b0 * v0z + b1 * v1z + b2 * v2z; const float clip_w = b0 * v0w + b1 * v1w + b2 * v2w; const float z = clip_z / clip_w; // Skip the pixel if it is beyond the near or far clipping plane. if (z < -1.0f || z > 1.0f) continue; // Insert into appropriate depth layer with insertion sort. float z_next = z; int32 id_next = triangle_id; float b0_next = b0; float b1_next = b1; float b2_next = b2; const int pixel_idx0 = iy * image_width + ix; for (int layer = 0; layer < num_layers; ++layer) { const int pixel_idx = pixel_idx0 + image_height * image_width * layer; if (z_next < z_buffer[pixel_idx]) { std::swap(z_next, z_buffer[pixel_idx]); std::swap(id_next, triangle_ids[pixel_idx]); if (barycentric_coordinates != nullptr) { std::swap(b0_next, barycentric_coordinates[3 * pixel_idx + 0]); std::swap(b1_next, barycentric_coordinates[3 * pixel_idx + 1]); std::swap(b2_next, barycentric_coordinates[3 * pixel_idx + 2]); } } // Exit the loop early if the clear depth (z == 1) is reached. if (z_next == 1) break; } } } } }
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/geometry/convolution/tests/graph_convolution_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for graph convolution ops.""" import itertools from absl.testing import parameterized import numpy as np import tensorflow as tf import tensorflow_graphics.geometry.convolution.graph_convolution as gc from tensorflow_graphics.util import test_case def _dense_to_sparse(data): """Convert a numpy array to a tf.SparseTensor.""" indices = np.where(data) return tf.SparseTensor( np.stack(indices, axis=-1), data[indices], dense_shape=data.shape) def _dummy_data(batch_size, num_vertices, num_channels): """Create inputs for feature_steered_convolution.""" if batch_size > 0: data = np.zeros( shape=(batch_size, num_vertices, num_channels), dtype=np.float32) neighbors = _dense_to_sparse( np.tile(np.eye(num_vertices, dtype=np.float32), (batch_size, 1, 1))) else: data = np.zeros(shape=(num_vertices, num_channels), dtype=np.float32) neighbors = _dense_to_sparse(np.eye(num_vertices, dtype=np.float32)) return data, neighbors def _dummy_variables(in_channels, out_channels, num_weight_matrices): """Create variable substitutes for feature_steered_convolution.""" var_u = tf.zeros(shape=(in_channels, num_weight_matrices)) var_v = tf.zeros(shape=(in_channels, num_weight_matrices)) var_c = tf.zeros(shape=(num_weight_matrices)) var_w = tf.zeros(shape=(num_weight_matrices, in_channels, out_channels)) var_b = tf.zeros(shape=(out_channels)) return var_u, var_v, var_c, var_w, var_b def _random_data(batch_size, num_vertices, num_channels, padding, only_self_edges, data_type=np.float32, neighbors_type=np.float32, sizes_type=np.int32): """Create random inputs for feature_steered_convolution.""" def _random_data_2d(padding): size = num_vertices if not padding else np.random.randint( low=1, high=num_vertices + 1) data = np.random.uniform(size=(size, num_channels)).astype(data_type) if only_self_edges: neighbors = np.eye(size, dtype=neighbors_type) else: random = np.random.uniform(size=(size, size)).astype(neighbors_type) neighbors = np.maximum( np.where(random > 0.75, np.ones_like(random), np.zeros_like(random)), np.eye(size, dtype=neighbors_type)) neighbors = neighbors / np.sum(neighbors, axis=1, keepdims=True) if padding: data = np.pad(data, ((0, num_vertices - size), (0, 0)), "constant") neighbors = np.pad(neighbors, ((0, num_vertices - size), (0, num_vertices - size)), "constant") return data, neighbors, size else: return data, neighbors if batch_size > 0: list_2d = [_random_data_2d(padding=padding) for _ in range(batch_size)] data = np.stack([i[0] for i in list_2d], 0).astype(data_type) neighbors = np.stack([i[1] for i in list_2d], 0).astype(neighbors_type) if padding: sizes = np.stack([i[2] for i in list_2d], 0).astype(sizes_type) return data, _dense_to_sparse(neighbors), sizes else: return data, _dense_to_sparse(neighbors) else: if padding: raise ValueError("Padding only allowed with batched data.") data, neighbors = _random_data_2d(padding=False) return data.astype(data_type), _dense_to_sparse( neighbors.astype(neighbors_type)) def _random_variables(in_channels, out_channels, num_weight_matrices, dtype=np.float32): """Create random variables for feature_steered_convolution.""" def _random_constant(shape, dtype): return tf.constant(np.random.uniform(size=shape).astype(dtype)) var_u = _random_constant([in_channels, num_weight_matrices], dtype) var_v = _random_constant([in_channels, num_weight_matrices], dtype) var_c = _random_constant([num_weight_matrices], dtype) var_w = _random_constant([num_weight_matrices, in_channels, out_channels], dtype) var_b = _random_constant([out_channels], dtype) return var_u, var_v, var_c, var_w, var_b class GraphConvolutionTestFeatureSteeredConvolutionTests(test_case.TestCase): @parameterized.parameters( ("'sizes' must have an integer type.", np.float32, np.float32, np.float32, np.float32), ("'data' must have a float type.", np.int32, np.float32, np.int32, np.float32), ("'neighbors' and 'data' must have the same type.", np.float32, np.float64, np.int32, np.float32), ) def test_feature_steered_convolution_exception_raised_types( self, err_msg, data_type, neighbors_type, sizes_type, var_type): """Check the type errors for invalid input types.""" data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type, neighbors_type, sizes_type) u, v, c, w, b = _random_variables(3, 3, 1, var_type) with self.assertRaisesRegexp(TypeError, err_msg): _ = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) @parameterized.parameters( (np.float32, np.float32, np.int32, np.float32), (np.float64, np.float64, np.int32, np.float64), (np.float32, np.float32, np.int64, np.float32), (np.float64, np.float64, np.int64, np.float64), ) def test_feature_steered_convolution_exception_not_raised_types( self, data_type, neighbors_type, sizes_type, var_type): """Check there are no exceptions for valid input types.""" data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type, neighbors_type, sizes_type) u, v, c, w, b = _random_variables(3, 3, 1, var_type) try: gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) except Exception as e: # pylint: disable=broad-except self.fail("Exception raised: %s" % str(e)) def test_feature_steered_convolution_exception_raised_shapes(self): """Check that invalid input shapes trigger the right exceptions.""" with self.assertRaisesRegexp(ValueError, "must have a rank of 2"): data, neighbors = _dummy_data(1, 5, 2) u, v, c, w, b = _dummy_variables(2, 2, 1) data = data[0, :] _ = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) with self.assertRaisesRegexp(ValueError, "must have a rank greater than 1"): u, v, c, w, b = _dummy_variables(2, 2, 1) data = np.ones(shape=(5), dtype=np.float32) neighbors = _dense_to_sparse(np.ones(shape=(5), dtype=np.float32)) _ = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) with self.assertRaisesRegexp(ValueError, "Not all batch dimensions are identical."): data, neighbors = _dummy_data(1, 5, 2) u, v, c, w, b = _dummy_variables(2, 2, 1) _ = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=(1, 1), var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) @parameterized.parameters( (1, 1, 1, 1, 1), (4, 2, 3, 6, 5), (0, 1, 1, 1, 1), (0, 2, 3, 6, 5), ) def test_feature_steered_convolution_output_shape(self, batch_size, num_vertices, in_channels, out_channels, num_weight_matrices): """Check that the output of convolution has the correct shape.""" data, neighbors = _dummy_data(batch_size, num_vertices, in_channels) u, v, c, w, b = _dummy_variables(in_channels, out_channels, num_weight_matrices) y = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) y_shape = y.shape.as_list() self.assertEqual(y_shape[-1], out_channels) self.assertAllEqual(y_shape[:-1], data.shape[:-1]) @parameterized.parameters( (1, 1, 1, 1, 1), (4, 2, 3, 6, 5), (0, 1, 1, 1, 1), (0, 2, 3, 6, 5), ) def test_feature_steered_convolution_only_self_edges(self, batch_size, num_vertices, in_channels, out_channels, num_weight_matrices): """Test convolution when the graph only has self edges.""" data, neighbors = _random_data( batch_size, num_vertices, in_channels, padding=False, only_self_edges=True) u, v, c, w, b = _random_variables(in_channels, out_channels, num_weight_matrices) with self.subTest(name="w=0_expect_output=b"): y = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=tf.zeros_like(w), var_b=b) y_expected = tf.broadcast_to(b, y.shape) self.assertAllEqual(y, y_expected) with self.subTest(name="translation_invariant_self_edges"): y = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=-u, var_c=c, var_w=w, var_b=b) q = tf.reshape( tf.exp(c) / tf.reduce_sum(input_tensor=tf.exp(c)), (num_weight_matrices, 1, 1)) if batch_size > 0: q_times_w = tf.reduce_sum(input_tensor=q * w, axis=0, keepdims=True) q_times_w = tf.tile(q_times_w, (batch_size, 1, 1)) else: q_times_w = tf.reduce_sum(input_tensor=q * w, axis=0) y_expected = tf.matmul(data, q_times_w) + tf.broadcast_to(b, y.shape) self.assertAllClose(y, y_expected) with self.subTest(name="constant_signal"): if batch_size > 0: constant_data = np.tile( np.random.uniform(size=(batch_size, 1, in_channels)).astype(np.float32), (1, num_vertices, 1)) else: constant_data = np.tile( np.random.uniform(size=(1, in_channels)).astype(np.float32), (num_vertices, 1)) y = gc.feature_steered_convolution( data=constant_data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) if batch_size > 0: y_expected = tf.tile(y[:, :1, :], (1, num_vertices, 1)) else: y_expected = tf.tile(y[:1, :], (num_vertices, 1)) self.assertAllClose(y, y_expected) @parameterized.parameters( (((1.0,), (2.0,), (3.0,)), np.ones(shape=(3, 3)) / 3.0, ((0.5,),), ((1.3,),), (-0.7,), (((0.8,),),), (3.0,), ((4.6,), (4.6,), (4.6,))), (((1.0,), (2.0,), (3.0,)), np.ones(shape=(3, 3)) / 3.0, ((0.5, 0.2),), ((0.3, 0.4),), (-0.7, 0.15), (((0.8,),), ((1.1,),)), (3.0,), ((5.011706928844621,), (4.971030281984818,), (4.927388658982911,))), ) def test_feature_steered_convolution_padding_preset(self, data, neighbors, u, v, c, w, b, expected): """Test expected result for preset data and filter values.""" array = (np.array(i) for i in (data, neighbors, expected)) data, neighbors, expected = array tensors = (tf.convert_to_tensor(value=np.array(i).astype(data.dtype)) \ for i in (u, v, c, w, b)) u, v, c, w, b = tensors y = gc.feature_steered_convolution( data=data, neighbors=_dense_to_sparse(neighbors), sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) self.assertAllClose(y, expected) @parameterized.parameters( (1, 5, 1, 1, 1), (2, 6, 3, 6, 5), (5, 15, 6, 12, 8), ) def test_feature_steered_convolution_padding_random(self, batch_size, num_vertices, in_channels, out_channels, num_weight_matrices): """Test mixed topology batches (random vertices and neighbors).""" data, neighbors, sizes = _random_data( batch_size, num_vertices, in_channels, padding=True, only_self_edges=False) u, v, c, w, b = _random_variables(in_channels, out_channels, num_weight_matrices) with self.subTest(name="if_w_is_0_then_y_is_b"): y = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=tf.zeros_like(w), var_b=b) for k in range(batch_size): y_crop = y[k, :sizes[k], :] y_expected = tf.broadcast_to(b, y_crop.shape) self.assertAllEqual(y_crop, y_expected) # Check for zeros in the padded region. self.assertAllEqual(y[k, sizes[k]:, :], tf.zeros((num_vertices - sizes[k], out_channels))) with self.subTest(name="convolve_with_constant"): constant_data = data for k in range(batch_size): constant_data[k, :sizes[k], :] = np.tile(data[k, 0, :], (sizes[k], 1)) y = gc.feature_steered_convolution( data=constant_data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) for k in range(batch_size): y_crop = y[k, :sizes[k], :] y_const = tf.broadcast_to(y_crop[0, :], y_crop.shape) self.assertAllClose(y_crop, y_const) # Check for zeros in the padded region. self.assertAllEqual(y[k, sizes[k]:, :], tf.zeros([num_vertices - sizes[k], out_channels])) @parameterized.parameters( (1, 10, 3, 1, True), (3, 6, 1, 4, True), (0, 10, 5, 2, False), (1, 10, 3, 1, False), (3, 6, 1, 4, False), (0, 10, 5, 2, False), ) def test_feature_steered_convolution_jacobian_random(self, batch_size, num_vertices, in_channels, num_weight_matrices, padding): """Test the jacobian for random input data.""" random_data = _random_data( batch_size, num_vertices, in_channels, padding, only_self_edges=False, data_type=np.float64, neighbors_type=np.float64) data_init = random_data[0] neighbors = random_data[1] sizes = None if not padding else random_data[2] u, v, c, w, b = _random_variables( in_channels, in_channels, num_weight_matrices, dtype=np.float64) def feature_steered_convolution(data): return gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) self.assert_jacobian_is_correct_fn(feature_steered_convolution, [data_init]) @parameterized.parameters( (1, 1, 0.0), (5, 1, 0.0), (1, 3, 0.0), (5, 3, 0.0), (1, 1, 1.0), (5, 1, 1.0), (1, 3, 1.0), (5, 3, 1.0), ) def test_feature_steered_convolution_jacobian_preset(self, num_vertices, num_channels, data_multiplier): """Test the jacobian is correct for preset inputs.""" # Corner cases include one vertex, one channel, and all-zero features. data_init = data_multiplier * np.random.uniform( size=(num_vertices, num_channels)).astype(np.float64) neighbors = tf.sparse.eye(num_vertices, dtype=tf.float64) u, v, c, w, b = _random_variables( num_channels, num_channels, 1, dtype=np.float64) def feature_steered_convolution(data): return gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) self.assert_jacobian_is_correct_fn(feature_steered_convolution, [data_init]) class EdgeConvolutionTemplateTests(test_case.TestCase): def _zeros(self, vertex_features, _, out_dimensions=None): """A callable for `edge_convolution_template`.""" if out_dimensions is None: return tf.zeros_like(vertex_features) else: return tf.zeros( shape=(vertex_features.shape.as_list()[0], out_dimensions), dtype=vertex_features.dtype) def _pass_through(self, _, neighbor_features): """A callable for `edge_convolution_template`.""" return neighbor_features def _circular_2d_data(self, num_vertices, include_normals=False): """Create data for a circle graph.""" # Vertices are points distributed uniformly on a circle, with each point # connected to its closest neighbor on either side. theta = np.linspace(0.0, np.pi * 2.0, num=num_vertices, endpoint=False) data = np.stack((np.cos(theta), np.sin(theta)), axis=-1) if include_normals: data = np.concatenate((data, data), axis=-1) eye = np.eye(num_vertices) neighbors = np.maximum(np.roll(eye, 1, axis=1), np.roll(eye, -1, axis=1)) * 0.5 return data, _dense_to_sparse(neighbors) def _edge_curvature_2d(self, vertex_features, neighbor_features): """A callable for `edge_convolution_template` that computes curvature.""" x_position, x_normal = tf.split( value=vertex_features, num_or_size_splits=2, axis=-1) y_position, y_normal = tf.split( value=neighbor_features, num_or_size_splits=2, axis=-1) yx_diff = x_position - y_position curvature_unscaled = tf.abs( tf.reduce_sum( input_tensor=(y_normal - x_normal) * yx_diff, axis=-1, keepdims=True)) edge_length_squared = tf.reduce_sum( input_tensor=yx_diff * yx_diff, axis=-1, keepdims=True) return tf.where( tf.less(edge_length_squared, 1e-7), tf.zeros_like(edge_length_squared), curvature_unscaled / edge_length_squared) @parameterized.parameters( ("'sizes' must have an integer type.", np.float32, np.float32, np.float32), ("'data' must have a float type.", np.int32, np.float32, np.int32), ("'neighbors' and 'data' must have the same type.", np.float32, np.float64, np.int32), ) def test_edge_convolution_template_exception_raised_types( self, err_msg, data_type, neighbors_type, sizes_type): """Check the type errors for invalid input types.""" data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type, neighbors_type, sizes_type) with self.assertRaisesRegexp(TypeError, err_msg): gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=sizes, edge_function=self._zeros, reduction="weighted", edge_function_kwargs=dict()) @parameterized.parameters( (np.float32, np.float32, np.int32), (np.float64, np.float64, np.int32), (np.float32, np.float32, np.int64), (np.float64, np.float64, np.int64), (np.float64, np.float64, np.int8), (np.float64, np.float64, np.uint8), (np.float64, np.float64, np.int16), (np.float64, np.float64, np.uint16), ) def test_edge_convolution_template_exception_not_raised_types( self, data_type, neighbors_type, sizes_type): """Check there are no exceptions for valid input types.""" data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type, neighbors_type, sizes_type) try: gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=sizes, edge_function=self._zeros, reduction="weighted", edge_function_kwargs=dict()) except Exception as e: # pylint: disable=broad-except self.fail("Exception raised: %s" % str(e)) def test_edge_convolution_template_exception_raised_shapes(self): """Check that invalid input shapes trigger the right exceptions.""" with self.assertRaisesRegexp(ValueError, "must have a rank of 2"): data, neighbors = _dummy_data(1, 5, 2) data = data[0, :] _ = gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=self._zeros, reduction="weighted", edge_function_kwargs=dict()) with self.assertRaisesRegexp(ValueError, "must have a rank greater than 1"): data = np.ones(shape=(5), dtype=np.float32) neighbors = _dense_to_sparse(np.ones(shape=(5), dtype=np.float32)) _ = gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=self._zeros, reduction="weighted", edge_function_kwargs=dict()) with self.assertRaisesRegexp(ValueError, "must have a rank of 1"): data, neighbors = _dummy_data(1, 5, 2) _ = gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=((1, 1), (1, 1)), edge_function=self._zeros, reduction="weighted", edge_function_kwargs=dict()) @parameterized.parameters("", "invalid") def test_edge_convolution_template_exception_raised_reduction( self, reduction): """Check that an invalid reduction method triggers the exception.""" with self.assertRaisesRegexp(ValueError, "reduction method"): data, neighbors = _dummy_data(1, 5, 2) gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=self._zeros, reduction=reduction, edge_function_kwargs=dict()) @parameterized.parameters( (1, 1, 1, 1, "weighted"), (4, 2, 3, 6, "weighted"), (0, 1, 1, 1, "max"), (0, 2, 3, 6, "max"), ) def test_edge_convolution_template_output_shape(self, batch_size, num_vertices, in_channels, out_channels, reduction): """Check that the output of convolution has the correct shape.""" data, neighbors = _dummy_data(batch_size, num_vertices, in_channels) y = gc.edge_convolution_template( data, neighbors, None, self._zeros, reduction=reduction, edge_function_kwargs={"out_dimensions": out_channels}) y_shape = y.shape.as_list() with self.subTest(name="out_channels"): self.assertEqual(y_shape[-1], out_channels) with self.subTest(name="shape"): self.assertAllEqual(y_shape[:-1], data.shape[:-1]) def test_edge_convolution_template_zero_neighbors(self): """Check that vertices with no neighbors map to zeros in the output.""" # We can reuse `self._edge_curvature_2d` as the curvature functional. num_vertices = 500 data, neighbors = self._circular_2d_data(num_vertices, include_normals=True) # Interleave the data with rows filled with random data, these rows will # have no neighbors in the adjacency matrix so should map to all zeros in # the output. rows_odd = tf.expand_dims( tf.range(start=1, limit=(2 * num_vertices), delta=2), -1) rows_even = tf.expand_dims( tf.range(start=0, limit=(2 * num_vertices + 1), delta=2), -1) data_interleaved = tf.scatter_nd( indices=rows_odd, updates=data, shape=(2 * num_vertices + 1, tf.shape(input=data)[-1])) random_data = tf.random.uniform( shape=(data.shape[0] + 1, data.shape[-1]), dtype=data.dtype) random_interleaved = tf.scatter_nd( indices=rows_even, updates=random_data, shape=(2 * num_vertices + 1, tf.shape(input=data)[-1])) data_interleaved = data_interleaved + random_interleaved neighbors_interleaved_indices = neighbors.indices * 2 + 1 neighbors_interleaved = tf.SparseTensor( indices=neighbors_interleaved_indices, values=neighbors.values, dense_shape=(2 * num_vertices + 1, 2 * num_vertices + 1)) # Convolve the interleaved data. data_curvature = gc.edge_convolution_template( data=data_interleaved, neighbors=neighbors_interleaved, sizes=None, edge_function=self._edge_curvature_2d, reduction="weighted", edge_function_kwargs=dict()) self.assertEqual(data_curvature.shape, (2 * num_vertices + 1, 1)) # The rows corresponding to the original input data measure the curvature. # The curvature at any point on a circle of radius 1 should be 1. # The interleaved rows of random data should map to zeros in the output. self.assertAllClose(data_curvature[1::2, :], np.ones(shape=(num_vertices, 1))) self.assertAllClose(data_curvature[::2, :], np.zeros(shape=(num_vertices + 1, 1))) @parameterized.parameters( (1, 10, 3, True, "weighted"), (3, 6, 1, True, "weighted"), (0, 10, 5, False, "weighted"), (1, 10, 3, False, "max"), (3, 6, 1, False, "max"), (0, 10, 5, False, "max"), ) def test_edge_convolution_template_jacobian_random(self, batch_size, num_vertices, in_channels, padding, reduction): """Test the jacobian for random input data.""" random_data = _random_data( batch_size, num_vertices, in_channels, padding, only_self_edges=False, data_type=np.float64, neighbors_type=np.float64) data_init = random_data[0] neighbors = random_data[1] sizes = None if not padding else random_data[2] def edge_convolution_template(data): return gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=sizes, edge_function=self._pass_through, reduction=reduction, edge_function_kwargs=dict()) self.assert_jacobian_is_correct_fn(edge_convolution_template, [data_init]) def test_edge_convolution_template_preset_max(self): data = np.array(((1, 2), (3, 4), (5, 6), (7, 8)), np.float32) neighbors = np.array( ((0, 1, 0, 1), (0, 0, 1, 0), (1, 1, 1, 0), (0, 0, 1, 1)), np.float32) neighbors = _dense_to_sparse(neighbors) true = np.array(((8, 10), (8, 10), (10, 12), (14, 16)), np.float32) with self.subTest("max_sum"): max_sum = gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=lambda x, y: x + y, reduction="max", edge_function_kwargs=dict()) self.assertAllEqual(max_sum, true) with self.subTest("max_sum_scaled"): # Max reduction ignores the weights, so scaling the neighbors weights # should not change the result. max_sum_scaled = gc.edge_convolution_template( data=data, neighbors=neighbors * 10.0, sizes=None, edge_function=lambda x, y: x + y, reduction="max", edge_function_kwargs=dict()) self.assertAllEqual(max_sum_scaled, true) @parameterized.parameters( itertools.product((1, 5), (1, 3), (0.0, 1.0), ("weighted", "max"))) def test_edge_convolution_template_jacobian_preset(self, num_vertices, num_channels, data_multiplier, reduction): """Test the jacobian is correct for preset inputs.""" # Corner cases include one vertex, one channel, and all-zero features. data_init = data_multiplier * np.random.uniform( size=(num_vertices, num_channels)).astype(np.float64) neighbors = tf.sparse.eye(num_vertices, dtype=tf.float64) def edge_convolution_template(data): return gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=self._pass_through, reduction=reduction, edge_function_kwargs=dict()) self.assert_jacobian_is_correct_fn(edge_convolution_template, [data_init]) def test_edge_convolution_template_laplacian_smoothing(self): r"""Test the expected result with laplacian smoothing. Laplacian smoothing for meshes is defined as $$y_i = \frac{1}{|\mathcal{N(i)}|} \sum_{j \in \mathcal{N(i)}} x_j$$ This can be computed using `edge_convolution_template` with `f(x, y)->y`. """ # We can reuse `self._pass_through(x, y)->y` as the smoothing functional. with self.subTest(name="only_self_edges_random"): num_vertices = 500 data = np.random.uniform(size=(num_vertices, 5)) neighbors = tf.sparse.eye(num_vertices, dtype=tf.as_dtype(data.dtype)) data_smoothed = gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=self._pass_through, reduction="weighted", edge_function_kwargs=dict()) self.assertAllEqual(data, data_smoothed) with self.subTest(name="circular_2d"): num_vertices = 500 data, neighbors = self._circular_2d_data(num_vertices) data_smoothed = gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=self._pass_through, reduction="weighted", edge_function_kwargs=dict()) # The smoothed points should have the same direction as the originals. data_smoothed_normalized = tf.nn.l2_normalize(data_smoothed, axis=-1) self.assertAllClose(data, data_smoothed_normalized) def test_edge_convolution_template_curvature(self): r"""Test the expected result with curvature. (Approximate) curvature for meshes is defined as $$\kappa_{v_i} = \frac{1}{|\mathcal{N}(v_i)|} \sum_{v_j \in \mathcal{N}(v_i)} \frac{(\vec{v_i} - \vec{v_j})^T (\vec{n_{v_i}} - \vec{n_{v_j}})} {\left|\vec{v_i}-\vec{v_j}\right|^2} $$ This can be computed using `edge_convolution_template` with $$f(x, y) = (n_x - n_y)^T (x - y) / ||x - y||^2.$$ where $$n_x$$ and $$n_y$$ are the normals at points $$x$$ and $$y$$ respectively. """ # We can reuse `self._edge_curvature_2d` as the curvature functional. num_vertices = 500 data, neighbors = self._circular_2d_data(num_vertices, include_normals=True) data_curvature = gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=self._edge_curvature_2d, reduction="weighted", edge_function_kwargs=dict()) # The curvature at each point on a circle of radius 1 should be 1. self.assertAllClose(data_curvature, np.ones(shape=(num_vertices, 1))) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for graph convolution ops.""" import itertools from absl.testing import parameterized import numpy as np import tensorflow as tf import tensorflow_graphics.geometry.convolution.graph_convolution as gc from tensorflow_graphics.util import test_case def _dense_to_sparse(data): """Convert a numpy array to a tf.SparseTensor.""" indices = np.where(data) return tf.SparseTensor( np.stack(indices, axis=-1), data[indices], dense_shape=data.shape) def _dummy_data(batch_size, num_vertices, num_channels): """Create inputs for feature_steered_convolution.""" if batch_size > 0: data = np.zeros( shape=(batch_size, num_vertices, num_channels), dtype=np.float32) neighbors = _dense_to_sparse( np.tile(np.eye(num_vertices, dtype=np.float32), (batch_size, 1, 1))) else: data = np.zeros(shape=(num_vertices, num_channels), dtype=np.float32) neighbors = _dense_to_sparse(np.eye(num_vertices, dtype=np.float32)) return data, neighbors def _dummy_variables(in_channels, out_channels, num_weight_matrices): """Create variable substitutes for feature_steered_convolution.""" var_u = tf.zeros(shape=(in_channels, num_weight_matrices)) var_v = tf.zeros(shape=(in_channels, num_weight_matrices)) var_c = tf.zeros(shape=(num_weight_matrices)) var_w = tf.zeros(shape=(num_weight_matrices, in_channels, out_channels)) var_b = tf.zeros(shape=(out_channels)) return var_u, var_v, var_c, var_w, var_b def _random_data(batch_size, num_vertices, num_channels, padding, only_self_edges, data_type=np.float32, neighbors_type=np.float32, sizes_type=np.int32): """Create random inputs for feature_steered_convolution.""" def _random_data_2d(padding): size = num_vertices if not padding else np.random.randint( low=1, high=num_vertices + 1) data = np.random.uniform(size=(size, num_channels)).astype(data_type) if only_self_edges: neighbors = np.eye(size, dtype=neighbors_type) else: random = np.random.uniform(size=(size, size)).astype(neighbors_type) neighbors = np.maximum( np.where(random > 0.75, np.ones_like(random), np.zeros_like(random)), np.eye(size, dtype=neighbors_type)) neighbors = neighbors / np.sum(neighbors, axis=1, keepdims=True) if padding: data = np.pad(data, ((0, num_vertices - size), (0, 0)), "constant") neighbors = np.pad(neighbors, ((0, num_vertices - size), (0, num_vertices - size)), "constant") return data, neighbors, size else: return data, neighbors if batch_size > 0: list_2d = [_random_data_2d(padding=padding) for _ in range(batch_size)] data = np.stack([i[0] for i in list_2d], 0).astype(data_type) neighbors = np.stack([i[1] for i in list_2d], 0).astype(neighbors_type) if padding: sizes = np.stack([i[2] for i in list_2d], 0).astype(sizes_type) return data, _dense_to_sparse(neighbors), sizes else: return data, _dense_to_sparse(neighbors) else: if padding: raise ValueError("Padding only allowed with batched data.") data, neighbors = _random_data_2d(padding=False) return data.astype(data_type), _dense_to_sparse( neighbors.astype(neighbors_type)) def _random_variables(in_channels, out_channels, num_weight_matrices, dtype=np.float32): """Create random variables for feature_steered_convolution.""" def _random_constant(shape, dtype): return tf.constant(np.random.uniform(size=shape).astype(dtype)) var_u = _random_constant([in_channels, num_weight_matrices], dtype) var_v = _random_constant([in_channels, num_weight_matrices], dtype) var_c = _random_constant([num_weight_matrices], dtype) var_w = _random_constant([num_weight_matrices, in_channels, out_channels], dtype) var_b = _random_constant([out_channels], dtype) return var_u, var_v, var_c, var_w, var_b class GraphConvolutionTestFeatureSteeredConvolutionTests(test_case.TestCase): @parameterized.parameters( ("'sizes' must have an integer type.", np.float32, np.float32, np.float32, np.float32), ("'data' must have a float type.", np.int32, np.float32, np.int32, np.float32), ("'neighbors' and 'data' must have the same type.", np.float32, np.float64, np.int32, np.float32), ) def test_feature_steered_convolution_exception_raised_types( self, err_msg, data_type, neighbors_type, sizes_type, var_type): """Check the type errors for invalid input types.""" data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type, neighbors_type, sizes_type) u, v, c, w, b = _random_variables(3, 3, 1, var_type) with self.assertRaisesRegexp(TypeError, err_msg): _ = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) @parameterized.parameters( (np.float32, np.float32, np.int32, np.float32), (np.float64, np.float64, np.int32, np.float64), (np.float32, np.float32, np.int64, np.float32), (np.float64, np.float64, np.int64, np.float64), ) def test_feature_steered_convolution_exception_not_raised_types( self, data_type, neighbors_type, sizes_type, var_type): """Check there are no exceptions for valid input types.""" data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type, neighbors_type, sizes_type) u, v, c, w, b = _random_variables(3, 3, 1, var_type) try: gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) except Exception as e: # pylint: disable=broad-except self.fail("Exception raised: %s" % str(e)) def test_feature_steered_convolution_exception_raised_shapes(self): """Check that invalid input shapes trigger the right exceptions.""" with self.assertRaisesRegexp(ValueError, "must have a rank of 2"): data, neighbors = _dummy_data(1, 5, 2) u, v, c, w, b = _dummy_variables(2, 2, 1) data = data[0, :] _ = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) with self.assertRaisesRegexp(ValueError, "must have a rank greater than 1"): u, v, c, w, b = _dummy_variables(2, 2, 1) data = np.ones(shape=(5), dtype=np.float32) neighbors = _dense_to_sparse(np.ones(shape=(5), dtype=np.float32)) _ = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) with self.assertRaisesRegexp(ValueError, "Not all batch dimensions are identical."): data, neighbors = _dummy_data(1, 5, 2) u, v, c, w, b = _dummy_variables(2, 2, 1) _ = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=(1, 1), var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) @parameterized.parameters( (1, 1, 1, 1, 1), (4, 2, 3, 6, 5), (0, 1, 1, 1, 1), (0, 2, 3, 6, 5), ) def test_feature_steered_convolution_output_shape(self, batch_size, num_vertices, in_channels, out_channels, num_weight_matrices): """Check that the output of convolution has the correct shape.""" data, neighbors = _dummy_data(batch_size, num_vertices, in_channels) u, v, c, w, b = _dummy_variables(in_channels, out_channels, num_weight_matrices) y = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) y_shape = y.shape.as_list() self.assertEqual(y_shape[-1], out_channels) self.assertAllEqual(y_shape[:-1], data.shape[:-1]) @parameterized.parameters( (1, 1, 1, 1, 1), (4, 2, 3, 6, 5), (0, 1, 1, 1, 1), (0, 2, 3, 6, 5), ) def test_feature_steered_convolution_only_self_edges(self, batch_size, num_vertices, in_channels, out_channels, num_weight_matrices): """Test convolution when the graph only has self edges.""" data, neighbors = _random_data( batch_size, num_vertices, in_channels, padding=False, only_self_edges=True) u, v, c, w, b = _random_variables(in_channels, out_channels, num_weight_matrices) with self.subTest(name="w=0_expect_output=b"): y = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=tf.zeros_like(w), var_b=b) y_expected = tf.broadcast_to(b, y.shape) self.assertAllEqual(y, y_expected) with self.subTest(name="translation_invariant_self_edges"): y = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=-u, var_c=c, var_w=w, var_b=b) q = tf.reshape( tf.exp(c) / tf.reduce_sum(input_tensor=tf.exp(c)), (num_weight_matrices, 1, 1)) if batch_size > 0: q_times_w = tf.reduce_sum(input_tensor=q * w, axis=0, keepdims=True) q_times_w = tf.tile(q_times_w, (batch_size, 1, 1)) else: q_times_w = tf.reduce_sum(input_tensor=q * w, axis=0) y_expected = tf.matmul(data, q_times_w) + tf.broadcast_to(b, y.shape) self.assertAllClose(y, y_expected) with self.subTest(name="constant_signal"): if batch_size > 0: constant_data = np.tile( np.random.uniform(size=(batch_size, 1, in_channels)).astype(np.float32), (1, num_vertices, 1)) else: constant_data = np.tile( np.random.uniform(size=(1, in_channels)).astype(np.float32), (num_vertices, 1)) y = gc.feature_steered_convolution( data=constant_data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) if batch_size > 0: y_expected = tf.tile(y[:, :1, :], (1, num_vertices, 1)) else: y_expected = tf.tile(y[:1, :], (num_vertices, 1)) self.assertAllClose(y, y_expected) @parameterized.parameters( (((1.0,), (2.0,), (3.0,)), np.ones(shape=(3, 3)) / 3.0, ((0.5,),), ((1.3,),), (-0.7,), (((0.8,),),), (3.0,), ((4.6,), (4.6,), (4.6,))), (((1.0,), (2.0,), (3.0,)), np.ones(shape=(3, 3)) / 3.0, ((0.5, 0.2),), ((0.3, 0.4),), (-0.7, 0.15), (((0.8,),), ((1.1,),)), (3.0,), ((5.011706928844621,), (4.971030281984818,), (4.927388658982911,))), ) def test_feature_steered_convolution_padding_preset(self, data, neighbors, u, v, c, w, b, expected): """Test expected result for preset data and filter values.""" array = (np.array(i) for i in (data, neighbors, expected)) data, neighbors, expected = array tensors = (tf.convert_to_tensor(value=np.array(i).astype(data.dtype)) \ for i in (u, v, c, w, b)) u, v, c, w, b = tensors y = gc.feature_steered_convolution( data=data, neighbors=_dense_to_sparse(neighbors), sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) self.assertAllClose(y, expected) @parameterized.parameters( (1, 5, 1, 1, 1), (2, 6, 3, 6, 5), (5, 15, 6, 12, 8), ) def test_feature_steered_convolution_padding_random(self, batch_size, num_vertices, in_channels, out_channels, num_weight_matrices): """Test mixed topology batches (random vertices and neighbors).""" data, neighbors, sizes = _random_data( batch_size, num_vertices, in_channels, padding=True, only_self_edges=False) u, v, c, w, b = _random_variables(in_channels, out_channels, num_weight_matrices) with self.subTest(name="if_w_is_0_then_y_is_b"): y = gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=tf.zeros_like(w), var_b=b) for k in range(batch_size): y_crop = y[k, :sizes[k], :] y_expected = tf.broadcast_to(b, y_crop.shape) self.assertAllEqual(y_crop, y_expected) # Check for zeros in the padded region. self.assertAllEqual(y[k, sizes[k]:, :], tf.zeros((num_vertices - sizes[k], out_channels))) with self.subTest(name="convolve_with_constant"): constant_data = data for k in range(batch_size): constant_data[k, :sizes[k], :] = np.tile(data[k, 0, :], (sizes[k], 1)) y = gc.feature_steered_convolution( data=constant_data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) for k in range(batch_size): y_crop = y[k, :sizes[k], :] y_const = tf.broadcast_to(y_crop[0, :], y_crop.shape) self.assertAllClose(y_crop, y_const) # Check for zeros in the padded region. self.assertAllEqual(y[k, sizes[k]:, :], tf.zeros([num_vertices - sizes[k], out_channels])) @parameterized.parameters( (1, 10, 3, 1, True), (3, 6, 1, 4, True), (0, 10, 5, 2, False), (1, 10, 3, 1, False), (3, 6, 1, 4, False), (0, 10, 5, 2, False), ) def test_feature_steered_convolution_jacobian_random(self, batch_size, num_vertices, in_channels, num_weight_matrices, padding): """Test the jacobian for random input data.""" random_data = _random_data( batch_size, num_vertices, in_channels, padding, only_self_edges=False, data_type=np.float64, neighbors_type=np.float64) data_init = random_data[0] neighbors = random_data[1] sizes = None if not padding else random_data[2] u, v, c, w, b = _random_variables( in_channels, in_channels, num_weight_matrices, dtype=np.float64) def feature_steered_convolution(data): return gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=sizes, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) self.assert_jacobian_is_correct_fn(feature_steered_convolution, [data_init]) @parameterized.parameters( (1, 1, 0.0), (5, 1, 0.0), (1, 3, 0.0), (5, 3, 0.0), (1, 1, 1.0), (5, 1, 1.0), (1, 3, 1.0), (5, 3, 1.0), ) def test_feature_steered_convolution_jacobian_preset(self, num_vertices, num_channels, data_multiplier): """Test the jacobian is correct for preset inputs.""" # Corner cases include one vertex, one channel, and all-zero features. data_init = data_multiplier * np.random.uniform( size=(num_vertices, num_channels)).astype(np.float64) neighbors = tf.sparse.eye(num_vertices, dtype=tf.float64) u, v, c, w, b = _random_variables( num_channels, num_channels, 1, dtype=np.float64) def feature_steered_convolution(data): return gc.feature_steered_convolution( data=data, neighbors=neighbors, sizes=None, var_u=u, var_v=v, var_c=c, var_w=w, var_b=b) self.assert_jacobian_is_correct_fn(feature_steered_convolution, [data_init]) class EdgeConvolutionTemplateTests(test_case.TestCase): def _zeros(self, vertex_features, _, out_dimensions=None): """A callable for `edge_convolution_template`.""" if out_dimensions is None: return tf.zeros_like(vertex_features) else: return tf.zeros( shape=(vertex_features.shape.as_list()[0], out_dimensions), dtype=vertex_features.dtype) def _pass_through(self, _, neighbor_features): """A callable for `edge_convolution_template`.""" return neighbor_features def _circular_2d_data(self, num_vertices, include_normals=False): """Create data for a circle graph.""" # Vertices are points distributed uniformly on a circle, with each point # connected to its closest neighbor on either side. theta = np.linspace(0.0, np.pi * 2.0, num=num_vertices, endpoint=False) data = np.stack((np.cos(theta), np.sin(theta)), axis=-1) if include_normals: data = np.concatenate((data, data), axis=-1) eye = np.eye(num_vertices) neighbors = np.maximum(np.roll(eye, 1, axis=1), np.roll(eye, -1, axis=1)) * 0.5 return data, _dense_to_sparse(neighbors) def _edge_curvature_2d(self, vertex_features, neighbor_features): """A callable for `edge_convolution_template` that computes curvature.""" x_position, x_normal = tf.split( value=vertex_features, num_or_size_splits=2, axis=-1) y_position, y_normal = tf.split( value=neighbor_features, num_or_size_splits=2, axis=-1) yx_diff = x_position - y_position curvature_unscaled = tf.abs( tf.reduce_sum( input_tensor=(y_normal - x_normal) * yx_diff, axis=-1, keepdims=True)) edge_length_squared = tf.reduce_sum( input_tensor=yx_diff * yx_diff, axis=-1, keepdims=True) return tf.where( tf.less(edge_length_squared, 1e-7), tf.zeros_like(edge_length_squared), curvature_unscaled / edge_length_squared) @parameterized.parameters( ("'sizes' must have an integer type.", np.float32, np.float32, np.float32), ("'data' must have a float type.", np.int32, np.float32, np.int32), ("'neighbors' and 'data' must have the same type.", np.float32, np.float64, np.int32), ) def test_edge_convolution_template_exception_raised_types( self, err_msg, data_type, neighbors_type, sizes_type): """Check the type errors for invalid input types.""" data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type, neighbors_type, sizes_type) with self.assertRaisesRegexp(TypeError, err_msg): gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=sizes, edge_function=self._zeros, reduction="weighted", edge_function_kwargs=dict()) @parameterized.parameters( (np.float32, np.float32, np.int32), (np.float64, np.float64, np.int32), (np.float32, np.float32, np.int64), (np.float64, np.float64, np.int64), (np.float64, np.float64, np.int8), (np.float64, np.float64, np.uint8), (np.float64, np.float64, np.int16), (np.float64, np.float64, np.uint16), ) def test_edge_convolution_template_exception_not_raised_types( self, data_type, neighbors_type, sizes_type): """Check there are no exceptions for valid input types.""" data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type, neighbors_type, sizes_type) try: gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=sizes, edge_function=self._zeros, reduction="weighted", edge_function_kwargs=dict()) except Exception as e: # pylint: disable=broad-except self.fail("Exception raised: %s" % str(e)) def test_edge_convolution_template_exception_raised_shapes(self): """Check that invalid input shapes trigger the right exceptions.""" with self.assertRaisesRegexp(ValueError, "must have a rank of 2"): data, neighbors = _dummy_data(1, 5, 2) data = data[0, :] _ = gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=self._zeros, reduction="weighted", edge_function_kwargs=dict()) with self.assertRaisesRegexp(ValueError, "must have a rank greater than 1"): data = np.ones(shape=(5), dtype=np.float32) neighbors = _dense_to_sparse(np.ones(shape=(5), dtype=np.float32)) _ = gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=self._zeros, reduction="weighted", edge_function_kwargs=dict()) with self.assertRaisesRegexp(ValueError, "must have a rank of 1"): data, neighbors = _dummy_data(1, 5, 2) _ = gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=((1, 1), (1, 1)), edge_function=self._zeros, reduction="weighted", edge_function_kwargs=dict()) @parameterized.parameters("", "invalid") def test_edge_convolution_template_exception_raised_reduction( self, reduction): """Check that an invalid reduction method triggers the exception.""" with self.assertRaisesRegexp(ValueError, "reduction method"): data, neighbors = _dummy_data(1, 5, 2) gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=self._zeros, reduction=reduction, edge_function_kwargs=dict()) @parameterized.parameters( (1, 1, 1, 1, "weighted"), (4, 2, 3, 6, "weighted"), (0, 1, 1, 1, "max"), (0, 2, 3, 6, "max"), ) def test_edge_convolution_template_output_shape(self, batch_size, num_vertices, in_channels, out_channels, reduction): """Check that the output of convolution has the correct shape.""" data, neighbors = _dummy_data(batch_size, num_vertices, in_channels) y = gc.edge_convolution_template( data, neighbors, None, self._zeros, reduction=reduction, edge_function_kwargs={"out_dimensions": out_channels}) y_shape = y.shape.as_list() with self.subTest(name="out_channels"): self.assertEqual(y_shape[-1], out_channels) with self.subTest(name="shape"): self.assertAllEqual(y_shape[:-1], data.shape[:-1]) def test_edge_convolution_template_zero_neighbors(self): """Check that vertices with no neighbors map to zeros in the output.""" # We can reuse `self._edge_curvature_2d` as the curvature functional. num_vertices = 500 data, neighbors = self._circular_2d_data(num_vertices, include_normals=True) # Interleave the data with rows filled with random data, these rows will # have no neighbors in the adjacency matrix so should map to all zeros in # the output. rows_odd = tf.expand_dims( tf.range(start=1, limit=(2 * num_vertices), delta=2), -1) rows_even = tf.expand_dims( tf.range(start=0, limit=(2 * num_vertices + 1), delta=2), -1) data_interleaved = tf.scatter_nd( indices=rows_odd, updates=data, shape=(2 * num_vertices + 1, tf.shape(input=data)[-1])) random_data = tf.random.uniform( shape=(data.shape[0] + 1, data.shape[-1]), dtype=data.dtype) random_interleaved = tf.scatter_nd( indices=rows_even, updates=random_data, shape=(2 * num_vertices + 1, tf.shape(input=data)[-1])) data_interleaved = data_interleaved + random_interleaved neighbors_interleaved_indices = neighbors.indices * 2 + 1 neighbors_interleaved = tf.SparseTensor( indices=neighbors_interleaved_indices, values=neighbors.values, dense_shape=(2 * num_vertices + 1, 2 * num_vertices + 1)) # Convolve the interleaved data. data_curvature = gc.edge_convolution_template( data=data_interleaved, neighbors=neighbors_interleaved, sizes=None, edge_function=self._edge_curvature_2d, reduction="weighted", edge_function_kwargs=dict()) self.assertEqual(data_curvature.shape, (2 * num_vertices + 1, 1)) # The rows corresponding to the original input data measure the curvature. # The curvature at any point on a circle of radius 1 should be 1. # The interleaved rows of random data should map to zeros in the output. self.assertAllClose(data_curvature[1::2, :], np.ones(shape=(num_vertices, 1))) self.assertAllClose(data_curvature[::2, :], np.zeros(shape=(num_vertices + 1, 1))) @parameterized.parameters( (1, 10, 3, True, "weighted"), (3, 6, 1, True, "weighted"), (0, 10, 5, False, "weighted"), (1, 10, 3, False, "max"), (3, 6, 1, False, "max"), (0, 10, 5, False, "max"), ) def test_edge_convolution_template_jacobian_random(self, batch_size, num_vertices, in_channels, padding, reduction): """Test the jacobian for random input data.""" random_data = _random_data( batch_size, num_vertices, in_channels, padding, only_self_edges=False, data_type=np.float64, neighbors_type=np.float64) data_init = random_data[0] neighbors = random_data[1] sizes = None if not padding else random_data[2] def edge_convolution_template(data): return gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=sizes, edge_function=self._pass_through, reduction=reduction, edge_function_kwargs=dict()) self.assert_jacobian_is_correct_fn(edge_convolution_template, [data_init]) def test_edge_convolution_template_preset_max(self): data = np.array(((1, 2), (3, 4), (5, 6), (7, 8)), np.float32) neighbors = np.array( ((0, 1, 0, 1), (0, 0, 1, 0), (1, 1, 1, 0), (0, 0, 1, 1)), np.float32) neighbors = _dense_to_sparse(neighbors) true = np.array(((8, 10), (8, 10), (10, 12), (14, 16)), np.float32) with self.subTest("max_sum"): max_sum = gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=lambda x, y: x + y, reduction="max", edge_function_kwargs=dict()) self.assertAllEqual(max_sum, true) with self.subTest("max_sum_scaled"): # Max reduction ignores the weights, so scaling the neighbors weights # should not change the result. max_sum_scaled = gc.edge_convolution_template( data=data, neighbors=neighbors * 10.0, sizes=None, edge_function=lambda x, y: x + y, reduction="max", edge_function_kwargs=dict()) self.assertAllEqual(max_sum_scaled, true) @parameterized.parameters( itertools.product((1, 5), (1, 3), (0.0, 1.0), ("weighted", "max"))) def test_edge_convolution_template_jacobian_preset(self, num_vertices, num_channels, data_multiplier, reduction): """Test the jacobian is correct for preset inputs.""" # Corner cases include one vertex, one channel, and all-zero features. data_init = data_multiplier * np.random.uniform( size=(num_vertices, num_channels)).astype(np.float64) neighbors = tf.sparse.eye(num_vertices, dtype=tf.float64) def edge_convolution_template(data): return gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=self._pass_through, reduction=reduction, edge_function_kwargs=dict()) self.assert_jacobian_is_correct_fn(edge_convolution_template, [data_init]) def test_edge_convolution_template_laplacian_smoothing(self): r"""Test the expected result with laplacian smoothing. Laplacian smoothing for meshes is defined as $$y_i = \frac{1}{|\mathcal{N(i)}|} \sum_{j \in \mathcal{N(i)}} x_j$$ This can be computed using `edge_convolution_template` with `f(x, y)->y`. """ # We can reuse `self._pass_through(x, y)->y` as the smoothing functional. with self.subTest(name="only_self_edges_random"): num_vertices = 500 data = np.random.uniform(size=(num_vertices, 5)) neighbors = tf.sparse.eye(num_vertices, dtype=tf.as_dtype(data.dtype)) data_smoothed = gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=self._pass_through, reduction="weighted", edge_function_kwargs=dict()) self.assertAllEqual(data, data_smoothed) with self.subTest(name="circular_2d"): num_vertices = 500 data, neighbors = self._circular_2d_data(num_vertices) data_smoothed = gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=self._pass_through, reduction="weighted", edge_function_kwargs=dict()) # The smoothed points should have the same direction as the originals. data_smoothed_normalized = tf.nn.l2_normalize(data_smoothed, axis=-1) self.assertAllClose(data, data_smoothed_normalized) def test_edge_convolution_template_curvature(self): r"""Test the expected result with curvature. (Approximate) curvature for meshes is defined as $$\kappa_{v_i} = \frac{1}{|\mathcal{N}(v_i)|} \sum_{v_j \in \mathcal{N}(v_i)} \frac{(\vec{v_i} - \vec{v_j})^T (\vec{n_{v_i}} - \vec{n_{v_j}})} {\left|\vec{v_i}-\vec{v_j}\right|^2} $$ This can be computed using `edge_convolution_template` with $$f(x, y) = (n_x - n_y)^T (x - y) / ||x - y||^2.$$ where $$n_x$$ and $$n_y$$ are the normals at points $$x$$ and $$y$$ respectively. """ # We can reuse `self._edge_curvature_2d` as the curvature functional. num_vertices = 500 data, neighbors = self._circular_2d_data(num_vertices, include_normals=True) data_curvature = gc.edge_convolution_template( data=data, neighbors=neighbors, sizes=None, edge_function=self._edge_curvature_2d, reduction="weighted", edge_function_kwargs=dict()) # The curvature at each point on a circle of radius 1 should be 1. self.assertAllClose(data_curvature, np.ones(shape=(num_vertices, 1))) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/util/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Util module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=g-import-not-at-top from tensorflow_graphics.util.doc import _import_tfg_docs if _import_tfg_docs(): from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import safe_ops from tensorflow_graphics.util import shape from tensorflow_graphics.util import test_case from tensorflow_graphics.util import tfg_flags # pylint: enable=g-import-not-at-top # The util modules are not exported. __all__ = []
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Util module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=g-import-not-at-top from tensorflow_graphics.util.doc import _import_tfg_docs if _import_tfg_docs(): from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import safe_ops from tensorflow_graphics.util import shape from tensorflow_graphics.util import test_case from tensorflow_graphics.util import tfg_flags # pylint: enable=g-import-not-at-top # The util modules are not exported. __all__ = []
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/geometry/transformation/quaternion.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements TensorFlow quaternion utility functions. A quaternion is written as $$q = xi + yj + zk + w$$, where $$i,j,k$$ forms the three bases of the imaginary part. The functions implemented in this file use the Hamilton convention where $$i^2 = j^2 = k^2 = ijk = -1$$. A quaternion is stored in a 4-D vector $$[x, y, z, w]^T$$. More details about Hamiltonian quaternions can be found on [this page.] (https://en.wikipedia.org/wiki/Quaternion) Note: Some of the functions expect normalized quaternions as inputs where $$x^2 + y^2 + z^2 + w^2 = 1$$. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.geometry.transformation import rotation_matrix_3d from tensorflow_graphics.math import vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import safe_ops from tensorflow_graphics.util import shape def _build_quaternion_from_sines_and_cosines(sin_half_angles, cos_half_angles): """Builds a quaternion from sines and cosines of half Euler angles. Note: In the following, A1 to An are optional batch dimensions. Args: sin_half_angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the sine of half Euler angles. cos_half_angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the cosine of half Euler angles. Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a quaternion. """ c1, c2, c3 = tf.unstack(cos_half_angles, axis=-1) s1, s2, s3 = tf.unstack(sin_half_angles, axis=-1) w = c1 * c2 * c3 + s1 * s2 * s3 x = -c1 * s2 * s3 + s1 * c2 * c3 y = c1 * s2 * c3 + s1 * c2 * s3 z = -s1 * s2 * c3 + c1 * c2 * s3 return tf.stack((x, y, z, w), axis=-1) def between_two_vectors_3d(vector1, vector2, name="quaternion_between_two_vectors_3d"): """Computes quaternion over the shortest arc between two vectors. Result quaternion describes shortest geodesic rotation from vector1 to vector2. Note: In the following, A1 to An are optional batch dimensions. Args: vector1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the first vector. vector2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the second vector. name: A name for this op that defaults to "quaternion_between_two_vectors_3d". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `vector1` or `vector2` is not supported. """ with tf.name_scope(name): vector1 = tf.convert_to_tensor(value=vector1) vector2 = tf.convert_to_tensor(value=vector2) shape.check_static( tensor=vector1, tensor_name="vector1", has_dim_equals=(-1, 3)) shape.check_static( tensor=vector2, tensor_name="vector2", has_dim_equals=(-1, 3)) shape.compare_batch_dimensions( tensors=(vector1, vector2), last_axes=-2, broadcast_compatible=True) # Make sure that we are dealing with unit vectors. vector1 = tf.nn.l2_normalize(vector1, axis=-1) vector2 = tf.nn.l2_normalize(vector2, axis=-1) cos_theta = vector.dot(vector1, vector2) real_part = 1.0 + cos_theta axis = vector.cross(vector1, vector2) # Compute arbitrary antiparallel axes to rotate around in case of opposite # vectors. x, y, z = tf.split(vector1, (1, 1, 1), axis=-1) x_bigger_z = tf.abs(x) > tf.abs(z) x_bigger_z = tf.concat([x_bigger_z] * 3, axis=-1) antiparallel_axis = tf.where(x_bigger_z, tf.concat((-y, x, tf.zeros_like(z)), axis=-1), tf.concat((tf.zeros_like(x), -z, y), axis=-1)) # Compute rotation between two vectors. is_antiparallel = real_part < 1e-6 is_antiparallel = tf.concat([is_antiparallel] * 4, axis=-1) rot = tf.where( is_antiparallel, tf.concat((antiparallel_axis, tf.zeros_like(real_part)), axis=-1), tf.concat((axis, real_part), axis=-1)) return tf.nn.l2_normalize(rot, axis=-1) def conjugate(quaternion, name="quaternion_conjugate"): """Computes the conjugate of a quaternion. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. name: A name for this op that defaults to "quaternion_conjugate". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `quaternion` is not supported. """ with tf.name_scope(name): quaternion = tf.convert_to_tensor(value=quaternion) shape.check_static( tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4)) quaternion = asserts.assert_normalized(quaternion) xyz, w = tf.split(quaternion, (3, 1), axis=-1) return tf.concat((-xyz, w), axis=-1) def from_axis_angle(axis, angle, name="quaternion_from_axis_angle"): """Converts an axis-angle representation to a quaternion. Note: In the following, A1 to An are optional batch dimensions. Args: axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized axis. angle: A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents an angle. name: A name for this op that defaults to "quaternion_from_axis_angle". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `axis` or `angle` is not supported. """ with tf.name_scope(name): axis = tf.convert_to_tensor(value=axis) angle = tf.convert_to_tensor(value=angle) shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3)) shape.check_static( tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1)) shape.compare_batch_dimensions( tensors=(axis, angle), last_axes=-2, broadcast_compatible=True) axis = asserts.assert_normalized(axis) half_angle = 0.5 * angle w = tf.cos(half_angle) xyz = tf.sin(half_angle) * axis return tf.concat((xyz, w), axis=-1) def from_euler(angles, name="quaternion_from_euler"): """Converts an Euler angle representation to a quaternion. Note: Uses the z-y-x rotation convention (Tait-Bryan angles). Note: In the following, A1 to An are optional batch dimensions. Args: angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the three Euler angles. `[..., 0]` is the angle about `x` in radians, `[..., 1]` is the angle about `y` in radians and `[..., 2]` is the angle about `z` in radians. name: A name for this op that defaults to "quaternion_from_euler". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `angles` is not supported. """ with tf.name_scope(name): angles = tf.convert_to_tensor(value=angles) shape.check_static( tensor=angles, tensor_name="angles", has_dim_equals=(-1, 3)) half_angles = angles / 2.0 cos_half_angles = tf.cos(half_angles) sin_half_angles = tf.sin(half_angles) return _build_quaternion_from_sines_and_cosines(sin_half_angles, cos_half_angles) def from_euler_with_small_angles_approximation(angles, name="quaternion_from_euler"): r"""Converts small Euler angles to quaternions. Under the small angle assumption, $$\sin(x)$$ and $$\cos(x)$$ can be approximated by their second order Taylor expansions, where $$\sin(x) \approx x$$ and $$\cos(x) \approx 1 - \frac{x^2}{2}$$. In the current implementation, the smallness of the angles is not verified. Note: Uses the z-y-x rotation convention (Tait-Bryan angles). Note: In the following, A1 to An are optional batch dimensions. Args: angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the three Euler angles. `[..., 0]` is the angle about `x` in radians, `[..., 1]` is the angle about `y` in radians and `[..., 2]` is the angle about `z` in radians. name: A name for this op that defaults to "quaternion_from_euler". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `angles` is not supported. """ with tf.name_scope(name): angles = tf.convert_to_tensor(value=angles) shape.check_static( tensor=angles, tensor_name="angles", has_dim_equals=(-1, 3)) half_angles = angles / 2.0 cos_half_angles = 1.0 - 0.5 * half_angles * half_angles sin_half_angles = half_angles quaternion = _build_quaternion_from_sines_and_cosines( sin_half_angles, cos_half_angles) # We need to normalize the quaternion due to the small angle approximation. return tf.nn.l2_normalize(quaternion, axis=-1) def from_rotation_matrix(rotation_matrix, name="quaternion_from_rotation_matrix"): """Converts a rotation matrix representation to a quaternion. Warning: This function is not smooth everywhere. Note: In the following, A1 to An are optional batch dimensions. Args: rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`, where the last two dimensions represent a rotation matrix. name: A name for this op that defaults to "quaternion_from_rotation_matrix". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `rotation_matrix` is not supported. """ with tf.name_scope(name): rotation_matrix = tf.convert_to_tensor(value=rotation_matrix) shape.check_static( tensor=rotation_matrix, tensor_name="rotation_matrix", has_rank_greater_than=1, has_dim_equals=((-1, 3), (-2, 3))) rotation_matrix = rotation_matrix_3d.assert_rotation_matrix_normalized( rotation_matrix) trace = tf.linalg.trace(rotation_matrix) eps_addition = asserts.select_eps_for_addition(rotation_matrix.dtype) rows = tf.unstack(rotation_matrix, axis=-2) entries = [tf.unstack(row, axis=-1) for row in rows] def tr_positive(): sq = tf.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_ops.safe_unsigned_div(entries[2][1] - entries[1][2], sq) qy = safe_ops.safe_unsigned_div(entries[0][2] - entries[2][0], sq) qz = safe_ops.safe_unsigned_div(entries[1][0] - entries[0][1], sq) return tf.stack((qx, qy, qz, qw), axis=-1) def cond_1(): sq = tf.sqrt(1.0 + entries[0][0] - entries[1][1] - entries[2][2] + eps_addition) * 2. # sq = 4 * qx. qw = safe_ops.safe_unsigned_div(entries[2][1] - entries[1][2], sq) qx = 0.25 * sq qy = safe_ops.safe_unsigned_div(entries[0][1] + entries[1][0], sq) qz = safe_ops.safe_unsigned_div(entries[0][2] + entries[2][0], sq) return tf.stack((qx, qy, qz, qw), axis=-1) def cond_2(): sq = tf.sqrt(1.0 + entries[1][1] - entries[0][0] - entries[2][2] + eps_addition) * 2. # sq = 4 * qy. qw = safe_ops.safe_unsigned_div(entries[0][2] - entries[2][0], sq) qx = safe_ops.safe_unsigned_div(entries[0][1] + entries[1][0], sq) qy = 0.25 * sq qz = safe_ops.safe_unsigned_div(entries[1][2] + entries[2][1], sq) return tf.stack((qx, qy, qz, qw), axis=-1) def cond_3(): sq = tf.sqrt(1.0 + entries[2][2] - entries[0][0] - entries[1][1] + eps_addition) * 2. # sq = 4 * qz. qw = safe_ops.safe_unsigned_div(entries[1][0] - entries[0][1], sq) qx = safe_ops.safe_unsigned_div(entries[0][2] + entries[2][0], sq) qy = safe_ops.safe_unsigned_div(entries[1][2] + entries[2][1], sq) qz = 0.25 * sq return tf.stack((qx, qy, qz, qw), axis=-1) def cond_idx(cond): cond = tf.expand_dims(cond, -1) cond = tf.tile(cond, [1] * (rotation_matrix.shape.ndims - 2) + [4]) return cond where_2 = tf.where( cond_idx(entries[1][1] > entries[2][2]), cond_2(), cond_3()) where_1 = tf.where( cond_idx((entries[0][0] > entries[1][1]) & (entries[0][0] > entries[2][2])), cond_1(), where_2) quat = tf.where(cond_idx(trace > 0), tr_positive(), where_1) return quat def inverse(quaternion, name="quaternion_inverse"): """Computes the inverse of a quaternion. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. name: A name for this op that defaults to "quaternion_inverse". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `quaternion` is not supported. """ with tf.name_scope(name): quaternion = tf.convert_to_tensor(value=quaternion) shape.check_static( tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4)) quaternion = asserts.assert_normalized(quaternion) squared_norm = tf.reduce_sum( input_tensor=tf.square(quaternion), axis=-1, keepdims=True) return safe_ops.safe_unsigned_div(conjugate(quaternion), squared_norm) def is_normalized(quaternion, atol=1e-3, name="quaternion_is_normalized"): """Determines if quaternion is normalized quaternion or not. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a quaternion. atol: The absolute tolerance parameter. name: A name for this op that defaults to "quaternion_is_normalized". Returns: A tensor of type `bool` and shape `[A1, ..., An, 1]`, where False indicates that the quaternion is not normalized. Raises: ValueError: If the shape of `quaternion` is not supported. """ with tf.name_scope(name): quaternion = tf.convert_to_tensor(value=quaternion) shape.check_static( tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4)) norms = tf.norm(tensor=quaternion, axis=-1, keepdims=True) return tf.where( tf.abs(norms - 1.) < atol, tf.ones_like(norms, dtype=bool), tf.zeros_like(norms, dtype=bool)) def normalize(quaternion, eps=1e-12, name="quaternion_normalize"): """Normalizes a quaternion. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a quaternion. eps: A lower bound value for the norm that defaults to 1e-12. name: A name for this op that defaults to "quaternion_normalize". Returns: A N-D tensor of shape `[?, ..., ?, 1]` where the quaternion elements have been normalized. Raises: ValueError: If the shape of `quaternion` is not supported. """ with tf.name_scope(name): quaternion = tf.convert_to_tensor(value=quaternion) shape.check_static( tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4)) return tf.math.l2_normalize(quaternion, axis=-1, epsilon=eps) def multiply(quaternion1, quaternion2, name="quaternion_multiply"): """Multiplies two quaternions. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion1: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a quaternion. quaternion2: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a quaternion. name: A name for this op that defaults to "quaternion_multiply". Returns: A tensor of shape `[A1, ..., An, 4]` representing quaternions. Raises: ValueError: If the shape of `quaternion1` or `quaternion2` is not supported. """ with tf.name_scope(name): quaternion1 = tf.convert_to_tensor(value=quaternion1) quaternion2 = tf.convert_to_tensor(value=quaternion2) shape.check_static( tensor=quaternion1, tensor_name="quaternion1", has_dim_equals=(-1, 4)) shape.check_static( tensor=quaternion2, tensor_name="quaternion2", has_dim_equals=(-1, 4)) x1, y1, z1, w1 = tf.unstack(quaternion1, axis=-1) x2, y2, z2, w2 = tf.unstack(quaternion2, axis=-1) x = x1 * w2 + y1 * z2 - z1 * y2 + w1 * x2 y = -x1 * z2 + y1 * w2 + z1 * x2 + w1 * y2 z = x1 * y2 - y1 * x2 + z1 * w2 + w1 * z2 w = -x1 * x2 - y1 * y2 - z1 * z2 + w1 * w2 return tf.stack((x, y, z, w), axis=-1) def normalized_random_uniform(quaternion_shape, name="quaternion_normalized_random_uniform"): """Random normalized quaternion following a uniform distribution law on SO(3). Args: quaternion_shape: A list representing the shape of the output tensor. name: A name for this op that defaults to "quaternion_normalized_random_uniform". Returns: A tensor of shape `[quaternion_shape[0],...,quaternion_shape[-1], 4]` representing random normalized quaternions. """ with tf.name_scope(name): quaternion_shape = tf.convert_to_tensor( value=quaternion_shape, dtype=tf.int32) quaternion_shape = tf.concat((quaternion_shape, tf.constant([4])), axis=0) random_normal = tf.random.normal(quaternion_shape) return normalize(random_normal) def normalized_random_uniform_initializer(): """Random unit quaternion initializer.""" # Since variable initializers must take `shape` as input, we cannot prevent # a clash between util.shape and the argument here. Therefore we have to # disable redefined-outer-name for this function. # pylint: disable=redefined-outer-name def _initializer(shape, dtype=tf.float32, partition_info=None): """Generate a random normalized quaternion. Note: In the following, A1 to An are optional batch dimensions. Args: shape: A list representing the shape of the output. The last entry of the list must be `4`. dtype: type of the output (tf.float32 is the only type supported). partition_info: how the variable is partitioned (not used). Returns: A tensor of shape `[A1, ..., An, 4]` representing normalized quaternions. Raises: ValueError: If `shape` or `dtype` are not supported. """ del partition_info # unused if dtype != tf.float32: raise ValueError("'dtype' must be tf.float32.") if shape[-1] != 4: raise ValueError("Last dimension of 'shape' must be 4.") return normalized_random_uniform(shape[:-1]) return _initializer # pylint: enable=redefined-outer-name def rotate(point, quaternion, name="quaternion_rotate"): """Rotates a point using a quaternion. Note: In the following, A1 to An are optional batch dimensions. Args: point: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a 3d point. quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. name: A name for this op that defaults to "quaternion_rotate". Returns: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a 3d point. Raises: ValueError: If the shape of `point` or `quaternion` is not supported. """ with tf.name_scope(name): point = tf.convert_to_tensor(value=point) quaternion = tf.convert_to_tensor(value=quaternion) shape.check_static( tensor=point, tensor_name="point", has_dim_equals=(-1, 3)) shape.check_static( tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4)) shape.compare_batch_dimensions( tensors=(point, quaternion), last_axes=-2, broadcast_compatible=True) quaternion = asserts.assert_normalized(quaternion) padding = [[0, 0] for _ in range(point.shape.ndims)] padding[-1][-1] = 1 point = tf.pad(tensor=point, paddings=padding, mode="CONSTANT") point = multiply(quaternion, point) point = multiply(point, conjugate(quaternion)) xyz, _ = tf.split(point, (3, 1), axis=-1) return xyz def relative_angle(quaternion1, quaternion2, name="quaternion_relative_angle"): r"""Computes the unsigned relative rotation angle between 2 unit quaternions. Given two normalized quanternions $$\mathbf{q}_1$$ and $$\mathbf{q}_2$$, the relative angle is computed as $$\theta = 2\arccos(\mathbf{q}_1^T\mathbf{q}_2)$$. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion1: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. quaternion2: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. name: A name for this op that defaults to "quaternion_relative_angle". Returns: A tensor of shape `[A1, ..., An, 1]` where the last dimension represents rotation angles in the range [0.0, pi]. Raises: ValueError: If the shape of `quaternion1` or `quaternion2` is not supported. """ with tf.name_scope(name): quaternion1 = tf.convert_to_tensor(value=quaternion1) quaternion2 = tf.convert_to_tensor(value=quaternion2) shape.check_static( tensor=quaternion1, tensor_name="quaternion1", has_dim_equals=(-1, 4)) shape.check_static( tensor=quaternion2, tensor_name="quaternion2", has_dim_equals=(-1, 4)) quaternion1 = asserts.assert_normalized(quaternion1) quaternion2 = asserts.assert_normalized(quaternion2) dot_product = vector.dot(quaternion1, quaternion2, keepdims=False) # Ensure dot product is in range [-1. 1]. eps_dot_prod = 4.0 * asserts.select_eps_for_addition(dot_product.dtype) dot_product = safe_ops.safe_shrink( dot_product, -1.0, 1.0, False, eps=eps_dot_prod) return 2.0 * tf.acos(tf.abs(dot_product)) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements TensorFlow quaternion utility functions. A quaternion is written as $$q = xi + yj + zk + w$$, where $$i,j,k$$ forms the three bases of the imaginary part. The functions implemented in this file use the Hamilton convention where $$i^2 = j^2 = k^2 = ijk = -1$$. A quaternion is stored in a 4-D vector $$[x, y, z, w]^T$$. More details about Hamiltonian quaternions can be found on [this page.] (https://en.wikipedia.org/wiki/Quaternion) Note: Some of the functions expect normalized quaternions as inputs where $$x^2 + y^2 + z^2 + w^2 = 1$$. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.geometry.transformation import rotation_matrix_3d from tensorflow_graphics.math import vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import safe_ops from tensorflow_graphics.util import shape def _build_quaternion_from_sines_and_cosines(sin_half_angles, cos_half_angles): """Builds a quaternion from sines and cosines of half Euler angles. Note: In the following, A1 to An are optional batch dimensions. Args: sin_half_angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the sine of half Euler angles. cos_half_angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the cosine of half Euler angles. Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a quaternion. """ c1, c2, c3 = tf.unstack(cos_half_angles, axis=-1) s1, s2, s3 = tf.unstack(sin_half_angles, axis=-1) w = c1 * c2 * c3 + s1 * s2 * s3 x = -c1 * s2 * s3 + s1 * c2 * c3 y = c1 * s2 * c3 + s1 * c2 * s3 z = -s1 * s2 * c3 + c1 * c2 * s3 return tf.stack((x, y, z, w), axis=-1) def between_two_vectors_3d(vector1, vector2, name="quaternion_between_two_vectors_3d"): """Computes quaternion over the shortest arc between two vectors. Result quaternion describes shortest geodesic rotation from vector1 to vector2. Note: In the following, A1 to An are optional batch dimensions. Args: vector1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the first vector. vector2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the second vector. name: A name for this op that defaults to "quaternion_between_two_vectors_3d". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `vector1` or `vector2` is not supported. """ with tf.name_scope(name): vector1 = tf.convert_to_tensor(value=vector1) vector2 = tf.convert_to_tensor(value=vector2) shape.check_static( tensor=vector1, tensor_name="vector1", has_dim_equals=(-1, 3)) shape.check_static( tensor=vector2, tensor_name="vector2", has_dim_equals=(-1, 3)) shape.compare_batch_dimensions( tensors=(vector1, vector2), last_axes=-2, broadcast_compatible=True) # Make sure that we are dealing with unit vectors. vector1 = tf.nn.l2_normalize(vector1, axis=-1) vector2 = tf.nn.l2_normalize(vector2, axis=-1) cos_theta = vector.dot(vector1, vector2) real_part = 1.0 + cos_theta axis = vector.cross(vector1, vector2) # Compute arbitrary antiparallel axes to rotate around in case of opposite # vectors. x, y, z = tf.split(vector1, (1, 1, 1), axis=-1) x_bigger_z = tf.abs(x) > tf.abs(z) x_bigger_z = tf.concat([x_bigger_z] * 3, axis=-1) antiparallel_axis = tf.where(x_bigger_z, tf.concat((-y, x, tf.zeros_like(z)), axis=-1), tf.concat((tf.zeros_like(x), -z, y), axis=-1)) # Compute rotation between two vectors. is_antiparallel = real_part < 1e-6 is_antiparallel = tf.concat([is_antiparallel] * 4, axis=-1) rot = tf.where( is_antiparallel, tf.concat((antiparallel_axis, tf.zeros_like(real_part)), axis=-1), tf.concat((axis, real_part), axis=-1)) return tf.nn.l2_normalize(rot, axis=-1) def conjugate(quaternion, name="quaternion_conjugate"): """Computes the conjugate of a quaternion. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. name: A name for this op that defaults to "quaternion_conjugate". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `quaternion` is not supported. """ with tf.name_scope(name): quaternion = tf.convert_to_tensor(value=quaternion) shape.check_static( tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4)) quaternion = asserts.assert_normalized(quaternion) xyz, w = tf.split(quaternion, (3, 1), axis=-1) return tf.concat((-xyz, w), axis=-1) def from_axis_angle(axis, angle, name="quaternion_from_axis_angle"): """Converts an axis-angle representation to a quaternion. Note: In the following, A1 to An are optional batch dimensions. Args: axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized axis. angle: A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents an angle. name: A name for this op that defaults to "quaternion_from_axis_angle". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `axis` or `angle` is not supported. """ with tf.name_scope(name): axis = tf.convert_to_tensor(value=axis) angle = tf.convert_to_tensor(value=angle) shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3)) shape.check_static( tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1)) shape.compare_batch_dimensions( tensors=(axis, angle), last_axes=-2, broadcast_compatible=True) axis = asserts.assert_normalized(axis) half_angle = 0.5 * angle w = tf.cos(half_angle) xyz = tf.sin(half_angle) * axis return tf.concat((xyz, w), axis=-1) def from_euler(angles, name="quaternion_from_euler"): """Converts an Euler angle representation to a quaternion. Note: Uses the z-y-x rotation convention (Tait-Bryan angles). Note: In the following, A1 to An are optional batch dimensions. Args: angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the three Euler angles. `[..., 0]` is the angle about `x` in radians, `[..., 1]` is the angle about `y` in radians and `[..., 2]` is the angle about `z` in radians. name: A name for this op that defaults to "quaternion_from_euler". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `angles` is not supported. """ with tf.name_scope(name): angles = tf.convert_to_tensor(value=angles) shape.check_static( tensor=angles, tensor_name="angles", has_dim_equals=(-1, 3)) half_angles = angles / 2.0 cos_half_angles = tf.cos(half_angles) sin_half_angles = tf.sin(half_angles) return _build_quaternion_from_sines_and_cosines(sin_half_angles, cos_half_angles) def from_euler_with_small_angles_approximation(angles, name="quaternion_from_euler"): r"""Converts small Euler angles to quaternions. Under the small angle assumption, $$\sin(x)$$ and $$\cos(x)$$ can be approximated by their second order Taylor expansions, where $$\sin(x) \approx x$$ and $$\cos(x) \approx 1 - \frac{x^2}{2}$$. In the current implementation, the smallness of the angles is not verified. Note: Uses the z-y-x rotation convention (Tait-Bryan angles). Note: In the following, A1 to An are optional batch dimensions. Args: angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the three Euler angles. `[..., 0]` is the angle about `x` in radians, `[..., 1]` is the angle about `y` in radians and `[..., 2]` is the angle about `z` in radians. name: A name for this op that defaults to "quaternion_from_euler". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `angles` is not supported. """ with tf.name_scope(name): angles = tf.convert_to_tensor(value=angles) shape.check_static( tensor=angles, tensor_name="angles", has_dim_equals=(-1, 3)) half_angles = angles / 2.0 cos_half_angles = 1.0 - 0.5 * half_angles * half_angles sin_half_angles = half_angles quaternion = _build_quaternion_from_sines_and_cosines( sin_half_angles, cos_half_angles) # We need to normalize the quaternion due to the small angle approximation. return tf.nn.l2_normalize(quaternion, axis=-1) def from_rotation_matrix(rotation_matrix, name="quaternion_from_rotation_matrix"): """Converts a rotation matrix representation to a quaternion. Warning: This function is not smooth everywhere. Note: In the following, A1 to An are optional batch dimensions. Args: rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`, where the last two dimensions represent a rotation matrix. name: A name for this op that defaults to "quaternion_from_rotation_matrix". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `rotation_matrix` is not supported. """ with tf.name_scope(name): rotation_matrix = tf.convert_to_tensor(value=rotation_matrix) shape.check_static( tensor=rotation_matrix, tensor_name="rotation_matrix", has_rank_greater_than=1, has_dim_equals=((-1, 3), (-2, 3))) rotation_matrix = rotation_matrix_3d.assert_rotation_matrix_normalized( rotation_matrix) trace = tf.linalg.trace(rotation_matrix) eps_addition = asserts.select_eps_for_addition(rotation_matrix.dtype) rows = tf.unstack(rotation_matrix, axis=-2) entries = [tf.unstack(row, axis=-1) for row in rows] def tr_positive(): sq = tf.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_ops.safe_unsigned_div(entries[2][1] - entries[1][2], sq) qy = safe_ops.safe_unsigned_div(entries[0][2] - entries[2][0], sq) qz = safe_ops.safe_unsigned_div(entries[1][0] - entries[0][1], sq) return tf.stack((qx, qy, qz, qw), axis=-1) def cond_1(): sq = tf.sqrt(1.0 + entries[0][0] - entries[1][1] - entries[2][2] + eps_addition) * 2. # sq = 4 * qx. qw = safe_ops.safe_unsigned_div(entries[2][1] - entries[1][2], sq) qx = 0.25 * sq qy = safe_ops.safe_unsigned_div(entries[0][1] + entries[1][0], sq) qz = safe_ops.safe_unsigned_div(entries[0][2] + entries[2][0], sq) return tf.stack((qx, qy, qz, qw), axis=-1) def cond_2(): sq = tf.sqrt(1.0 + entries[1][1] - entries[0][0] - entries[2][2] + eps_addition) * 2. # sq = 4 * qy. qw = safe_ops.safe_unsigned_div(entries[0][2] - entries[2][0], sq) qx = safe_ops.safe_unsigned_div(entries[0][1] + entries[1][0], sq) qy = 0.25 * sq qz = safe_ops.safe_unsigned_div(entries[1][2] + entries[2][1], sq) return tf.stack((qx, qy, qz, qw), axis=-1) def cond_3(): sq = tf.sqrt(1.0 + entries[2][2] - entries[0][0] - entries[1][1] + eps_addition) * 2. # sq = 4 * qz. qw = safe_ops.safe_unsigned_div(entries[1][0] - entries[0][1], sq) qx = safe_ops.safe_unsigned_div(entries[0][2] + entries[2][0], sq) qy = safe_ops.safe_unsigned_div(entries[1][2] + entries[2][1], sq) qz = 0.25 * sq return tf.stack((qx, qy, qz, qw), axis=-1) def cond_idx(cond): cond = tf.expand_dims(cond, -1) cond = tf.tile(cond, [1] * (rotation_matrix.shape.ndims - 2) + [4]) return cond where_2 = tf.where( cond_idx(entries[1][1] > entries[2][2]), cond_2(), cond_3()) where_1 = tf.where( cond_idx((entries[0][0] > entries[1][1]) & (entries[0][0] > entries[2][2])), cond_1(), where_2) quat = tf.where(cond_idx(trace > 0), tr_positive(), where_1) return quat def inverse(quaternion, name="quaternion_inverse"): """Computes the inverse of a quaternion. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. name: A name for this op that defaults to "quaternion_inverse". Returns: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. Raises: ValueError: If the shape of `quaternion` is not supported. """ with tf.name_scope(name): quaternion = tf.convert_to_tensor(value=quaternion) shape.check_static( tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4)) quaternion = asserts.assert_normalized(quaternion) squared_norm = tf.reduce_sum( input_tensor=tf.square(quaternion), axis=-1, keepdims=True) return safe_ops.safe_unsigned_div(conjugate(quaternion), squared_norm) def is_normalized(quaternion, atol=1e-3, name="quaternion_is_normalized"): """Determines if quaternion is normalized quaternion or not. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a quaternion. atol: The absolute tolerance parameter. name: A name for this op that defaults to "quaternion_is_normalized". Returns: A tensor of type `bool` and shape `[A1, ..., An, 1]`, where False indicates that the quaternion is not normalized. Raises: ValueError: If the shape of `quaternion` is not supported. """ with tf.name_scope(name): quaternion = tf.convert_to_tensor(value=quaternion) shape.check_static( tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4)) norms = tf.norm(tensor=quaternion, axis=-1, keepdims=True) return tf.where( tf.abs(norms - 1.) < atol, tf.ones_like(norms, dtype=bool), tf.zeros_like(norms, dtype=bool)) def normalize(quaternion, eps=1e-12, name="quaternion_normalize"): """Normalizes a quaternion. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a quaternion. eps: A lower bound value for the norm that defaults to 1e-12. name: A name for this op that defaults to "quaternion_normalize". Returns: A N-D tensor of shape `[?, ..., ?, 1]` where the quaternion elements have been normalized. Raises: ValueError: If the shape of `quaternion` is not supported. """ with tf.name_scope(name): quaternion = tf.convert_to_tensor(value=quaternion) shape.check_static( tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4)) return tf.math.l2_normalize(quaternion, axis=-1, epsilon=eps) def multiply(quaternion1, quaternion2, name="quaternion_multiply"): """Multiplies two quaternions. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion1: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a quaternion. quaternion2: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a quaternion. name: A name for this op that defaults to "quaternion_multiply". Returns: A tensor of shape `[A1, ..., An, 4]` representing quaternions. Raises: ValueError: If the shape of `quaternion1` or `quaternion2` is not supported. """ with tf.name_scope(name): quaternion1 = tf.convert_to_tensor(value=quaternion1) quaternion2 = tf.convert_to_tensor(value=quaternion2) shape.check_static( tensor=quaternion1, tensor_name="quaternion1", has_dim_equals=(-1, 4)) shape.check_static( tensor=quaternion2, tensor_name="quaternion2", has_dim_equals=(-1, 4)) x1, y1, z1, w1 = tf.unstack(quaternion1, axis=-1) x2, y2, z2, w2 = tf.unstack(quaternion2, axis=-1) x = x1 * w2 + y1 * z2 - z1 * y2 + w1 * x2 y = -x1 * z2 + y1 * w2 + z1 * x2 + w1 * y2 z = x1 * y2 - y1 * x2 + z1 * w2 + w1 * z2 w = -x1 * x2 - y1 * y2 - z1 * z2 + w1 * w2 return tf.stack((x, y, z, w), axis=-1) def normalized_random_uniform(quaternion_shape, name="quaternion_normalized_random_uniform"): """Random normalized quaternion following a uniform distribution law on SO(3). Args: quaternion_shape: A list representing the shape of the output tensor. name: A name for this op that defaults to "quaternion_normalized_random_uniform". Returns: A tensor of shape `[quaternion_shape[0],...,quaternion_shape[-1], 4]` representing random normalized quaternions. """ with tf.name_scope(name): quaternion_shape = tf.convert_to_tensor( value=quaternion_shape, dtype=tf.int32) quaternion_shape = tf.concat((quaternion_shape, tf.constant([4])), axis=0) random_normal = tf.random.normal(quaternion_shape) return normalize(random_normal) def normalized_random_uniform_initializer(): """Random unit quaternion initializer.""" # Since variable initializers must take `shape` as input, we cannot prevent # a clash between util.shape and the argument here. Therefore we have to # disable redefined-outer-name for this function. # pylint: disable=redefined-outer-name def _initializer(shape, dtype=tf.float32, partition_info=None): """Generate a random normalized quaternion. Note: In the following, A1 to An are optional batch dimensions. Args: shape: A list representing the shape of the output. The last entry of the list must be `4`. dtype: type of the output (tf.float32 is the only type supported). partition_info: how the variable is partitioned (not used). Returns: A tensor of shape `[A1, ..., An, 4]` representing normalized quaternions. Raises: ValueError: If `shape` or `dtype` are not supported. """ del partition_info # unused if dtype != tf.float32: raise ValueError("'dtype' must be tf.float32.") if shape[-1] != 4: raise ValueError("Last dimension of 'shape' must be 4.") return normalized_random_uniform(shape[:-1]) return _initializer # pylint: enable=redefined-outer-name def rotate(point, quaternion, name="quaternion_rotate"): """Rotates a point using a quaternion. Note: In the following, A1 to An are optional batch dimensions. Args: point: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a 3d point. quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. name: A name for this op that defaults to "quaternion_rotate". Returns: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a 3d point. Raises: ValueError: If the shape of `point` or `quaternion` is not supported. """ with tf.name_scope(name): point = tf.convert_to_tensor(value=point) quaternion = tf.convert_to_tensor(value=quaternion) shape.check_static( tensor=point, tensor_name="point", has_dim_equals=(-1, 3)) shape.check_static( tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4)) shape.compare_batch_dimensions( tensors=(point, quaternion), last_axes=-2, broadcast_compatible=True) quaternion = asserts.assert_normalized(quaternion) padding = [[0, 0] for _ in range(point.shape.ndims)] padding[-1][-1] = 1 point = tf.pad(tensor=point, paddings=padding, mode="CONSTANT") point = multiply(quaternion, point) point = multiply(point, conjugate(quaternion)) xyz, _ = tf.split(point, (3, 1), axis=-1) return xyz def relative_angle(quaternion1, quaternion2, name="quaternion_relative_angle"): r"""Computes the unsigned relative rotation angle between 2 unit quaternions. Given two normalized quanternions $$\mathbf{q}_1$$ and $$\mathbf{q}_2$$, the relative angle is computed as $$\theta = 2\arccos(\mathbf{q}_1^T\mathbf{q}_2)$$. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion1: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. quaternion2: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. name: A name for this op that defaults to "quaternion_relative_angle". Returns: A tensor of shape `[A1, ..., An, 1]` where the last dimension represents rotation angles in the range [0.0, pi]. Raises: ValueError: If the shape of `quaternion1` or `quaternion2` is not supported. """ with tf.name_scope(name): quaternion1 = tf.convert_to_tensor(value=quaternion1) quaternion2 = tf.convert_to_tensor(value=quaternion2) shape.check_static( tensor=quaternion1, tensor_name="quaternion1", has_dim_equals=(-1, 4)) shape.check_static( tensor=quaternion2, tensor_name="quaternion2", has_dim_equals=(-1, 4)) quaternion1 = asserts.assert_normalized(quaternion1) quaternion2 = asserts.assert_normalized(quaternion2) dot_product = vector.dot(quaternion1, quaternion2, keepdims=False) # Ensure dot product is in range [-1. 1]. eps_dot_prod = 4.0 * asserts.select_eps_for_addition(dot_product.dtype) dot_product = safe_ops.safe_shrink( dot_product, -1.0, 1.0, False, eps=eps_dot_prod) return 2.0 * tf.acos(tf.abs(dot_product)) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/geometry/representation/point.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tensorflow point utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.math import vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def distance_to_ray(point, origin, direction, keepdims=True, name="point_distance_to_ray"): """Computes the distance from a M-d point to a M-d ray. Note: In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. Args: point: A tensor of shape `[A1, ..., An, M]`. origin: A tensor of shape `[A1, ..., An, M]`. direction: A tensor of shape `[A1, ..., An, M]`. The last dimension must be normalized. keepdims: A `bool`, whether to keep the last dimension with length 1 or to remove it. name: A name for this op. Defaults to "point_distance_to_ray". Returns: A tensor of shape `[A1, ..., An, 1]` containing the distance from each point to the corresponding ray. Raises: ValueError: If the shape of `point`, `origin`, or 'direction' is not supported. """ with tf.name_scope(name): point = tf.convert_to_tensor(value=point) origin = tf.convert_to_tensor(value=origin) direction = tf.convert_to_tensor(value=direction) shape.compare_dimensions((point, origin, direction), -1, ("point", "origin", "direction")) shape.compare_batch_dimensions( tensors=(point, origin, direction), last_axes=-2, broadcast_compatible=True) direction = asserts.assert_normalized(direction) vec = point - origin dot = vector.dot(vec, direction) vec -= dot * direction return tf.norm(tensor=vec, axis=-1, keepdims=keepdims) def project_to_ray(point, origin, direction, name="point_project_to_ray"): """Computes the projection of a M-d point on a M-d ray. Note: In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. Args: point: A tensor of shape `[A1, ..., An, M]`. origin: A tensor of shape `[A1, ..., An, M]`. direction: A tensor of shape `[A1, ..., An, M]`. The last dimension must be normalized. name: A name for this op. Defaults to "point_project_to_ray". Returns: A tensor of shape `[A1, ..., An, M]` containing the projected point. Raises: ValueError: If the shape of `point`, `origin`, or 'direction' is not supported. """ with tf.name_scope(name): point = tf.convert_to_tensor(value=point) origin = tf.convert_to_tensor(value=origin) direction = tf.convert_to_tensor(value=direction) shape.compare_dimensions((point, origin, direction), -1, ("point", "origin", "direction")) shape.compare_batch_dimensions( tensors=(point, origin, direction), last_axes=-2, broadcast_compatible=True) direction = asserts.assert_normalized(direction) vec = point - origin dot = vector.dot(vec, direction) return origin + dot * direction # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tensorflow point utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.math import vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def distance_to_ray(point, origin, direction, keepdims=True, name="point_distance_to_ray"): """Computes the distance from a M-d point to a M-d ray. Note: In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. Args: point: A tensor of shape `[A1, ..., An, M]`. origin: A tensor of shape `[A1, ..., An, M]`. direction: A tensor of shape `[A1, ..., An, M]`. The last dimension must be normalized. keepdims: A `bool`, whether to keep the last dimension with length 1 or to remove it. name: A name for this op. Defaults to "point_distance_to_ray". Returns: A tensor of shape `[A1, ..., An, 1]` containing the distance from each point to the corresponding ray. Raises: ValueError: If the shape of `point`, `origin`, or 'direction' is not supported. """ with tf.name_scope(name): point = tf.convert_to_tensor(value=point) origin = tf.convert_to_tensor(value=origin) direction = tf.convert_to_tensor(value=direction) shape.compare_dimensions((point, origin, direction), -1, ("point", "origin", "direction")) shape.compare_batch_dimensions( tensors=(point, origin, direction), last_axes=-2, broadcast_compatible=True) direction = asserts.assert_normalized(direction) vec = point - origin dot = vector.dot(vec, direction) vec -= dot * direction return tf.norm(tensor=vec, axis=-1, keepdims=keepdims) def project_to_ray(point, origin, direction, name="point_project_to_ray"): """Computes the projection of a M-d point on a M-d ray. Note: In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. Args: point: A tensor of shape `[A1, ..., An, M]`. origin: A tensor of shape `[A1, ..., An, M]`. direction: A tensor of shape `[A1, ..., An, M]`. The last dimension must be normalized. name: A name for this op. Defaults to "point_project_to_ray". Returns: A tensor of shape `[A1, ..., An, M]` containing the projected point. Raises: ValueError: If the shape of `point`, `origin`, or 'direction' is not supported. """ with tf.name_scope(name): point = tf.convert_to_tensor(value=point) origin = tf.convert_to_tensor(value=origin) direction = tf.convert_to_tensor(value=direction) shape.compare_dimensions((point, origin, direction), -1, ("point", "origin", "direction")) shape.compare_batch_dimensions( tensors=(point, origin, direction), last_axes=-2, broadcast_compatible=True) direction = asserts.assert_normalized(direction) vec = point - origin dot = vector.dot(vec, direction) return origin + dot * direction # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/util/tests/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/datasets/pix3d/fakes/model/bed/IKEA_MALM_2/3d_keypoints.txt
-0.286364 -0.032406 -0.383607 -0.286361 -0.139994 -0.384352 0.286633 -0.032366 -0.383598 0.286637 -0.138093 -0.384767 0.286443 -0.032130 0.364876 -0.286424 -0.032754 0.364012 -0.286583 0.139071 0.384867 0.284924 0.137388 0.385138 0.284489 -0.140542 0.383167 -0.282070 -0.140273 0.385144
-0.286364 -0.032406 -0.383607 -0.286361 -0.139994 -0.384352 0.286633 -0.032366 -0.383598 0.286637 -0.138093 -0.384767 0.286443 -0.032130 0.364876 -0.286424 -0.032754 0.364012 -0.286583 0.139071 0.384867 0.284924 0.137388 0.385138 0.284489 -0.140542 0.383167 -0.282070 -0.140273 0.385144
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/geometry/representation/grid.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tensorflow grid utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def _grid(starts, stops, nums): """Generates a M-D uniform axis-aligned grid. Warning: This op is not differentiable. Indeed, the gradient of tf.linspace and tf.meshgrid are currently not defined. Args: starts: A tensor of shape `[M]` representing the start points for each dimension. stops: A tensor of shape `[M]` representing the end points for each dimension. nums: A tensor of shape `[M]` representing the number of subdivisions for each dimension. Returns: A tensor of shape `[nums[0], ..., nums[M-1], M]` containing an M-D uniform grid. """ params = [tf.unstack(tensor) for tensor in [starts, stops, nums]] layout = [tf.linspace(*param) for param in zip(*params)] return tf.stack(tf.meshgrid(*layout, indexing="ij"), axis=-1) def generate(starts, stops, nums, name="grid_generate"): r"""Generates a M-D uniform axis-aligned grid. Warning: This op is not differentiable. Indeed, the gradient of tf.linspace and tf.meshgrid are currently not defined. Note: In the following, `B` is an optional batch dimension. Args: starts: A tensor of shape `[M]` or `[B, M]`, where the last dimension represents a M-D start point. stops: A tensor of shape `[M]` or `[B, M]`, where the last dimension represents a M-D end point. nums: A tensor of shape `[M]` representing the number of subdivisions for each dimension. name: A name for this op. Defaults to "grid_generate". Returns: A tensor of shape `[nums[0], ..., nums[M-1], M]` containing an M-D uniform grid or a tensor of shape `[B, nums[0], ..., nums[M-1], M]` containing B M-D uniform grids. Please refer to the example below for more details. Raises: ValueError: If the shape of `starts`, `stops`, or `nums` is not supported. Examples: ```python print(generate((-1.0, -2.0), (1.0, 2.0), (3, 5))) >>> [[[-1. -2.] [-1. -1.] [-1. 0.] [-1. 1.] [-1. 2.]] [[ 0. -2.] [ 0. -1.] [ 0. 0.] [ 0. 1.] [ 0. 2.]] [[ 1. -2.] [ 1. -1.] [ 1. 0.] [ 1. 1.] [ 1. 2.]]] ``` Generates a 3x5 2d grid from -1.0 to 1.0 with 3 subdivisions for the x axis and from -2.0 to 2.0 with 5 subdivisions for the y axis. This lead to a tensor of shape (3, 5, 2). """ with tf.name_scope(name): starts = tf.convert_to_tensor(value=starts) stops = tf.convert_to_tensor(value=stops) nums = tf.convert_to_tensor(value=nums) shape.check_static( tensor=starts, tensor_name="starts", has_rank_greater_than=0, has_rank_less_than=3) shape.check_static( tensor=stops, tensor_name="stops", has_rank_greater_than=0, has_rank_less_than=3) shape.check_static(tensor=nums, tensor_name="nums", has_rank=1) shape.compare_batch_dimensions( tensors=(starts, stops), last_axes=(-1, -1), broadcast_compatible=False) shape.compare_dimensions((starts, stops, nums), -1, ("starts", "stops", "nums")) if starts.shape.ndims == 1: return _grid(starts, stops, nums) else: return tf.stack([ _grid(starts, stops, nums) for starts, stops in zip(tf.unstack(starts), tf.unstack(stops)) ]) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tensorflow grid utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def _grid(starts, stops, nums): """Generates a M-D uniform axis-aligned grid. Warning: This op is not differentiable. Indeed, the gradient of tf.linspace and tf.meshgrid are currently not defined. Args: starts: A tensor of shape `[M]` representing the start points for each dimension. stops: A tensor of shape `[M]` representing the end points for each dimension. nums: A tensor of shape `[M]` representing the number of subdivisions for each dimension. Returns: A tensor of shape `[nums[0], ..., nums[M-1], M]` containing an M-D uniform grid. """ params = [tf.unstack(tensor) for tensor in [starts, stops, nums]] layout = [tf.linspace(*param) for param in zip(*params)] return tf.stack(tf.meshgrid(*layout, indexing="ij"), axis=-1) def generate(starts, stops, nums, name="grid_generate"): r"""Generates a M-D uniform axis-aligned grid. Warning: This op is not differentiable. Indeed, the gradient of tf.linspace and tf.meshgrid are currently not defined. Note: In the following, `B` is an optional batch dimension. Args: starts: A tensor of shape `[M]` or `[B, M]`, where the last dimension represents a M-D start point. stops: A tensor of shape `[M]` or `[B, M]`, where the last dimension represents a M-D end point. nums: A tensor of shape `[M]` representing the number of subdivisions for each dimension. name: A name for this op. Defaults to "grid_generate". Returns: A tensor of shape `[nums[0], ..., nums[M-1], M]` containing an M-D uniform grid or a tensor of shape `[B, nums[0], ..., nums[M-1], M]` containing B M-D uniform grids. Please refer to the example below for more details. Raises: ValueError: If the shape of `starts`, `stops`, or `nums` is not supported. Examples: ```python print(generate((-1.0, -2.0), (1.0, 2.0), (3, 5))) >>> [[[-1. -2.] [-1. -1.] [-1. 0.] [-1. 1.] [-1. 2.]] [[ 0. -2.] [ 0. -1.] [ 0. 0.] [ 0. 1.] [ 0. 2.]] [[ 1. -2.] [ 1. -1.] [ 1. 0.] [ 1. 1.] [ 1. 2.]]] ``` Generates a 3x5 2d grid from -1.0 to 1.0 with 3 subdivisions for the x axis and from -2.0 to 2.0 with 5 subdivisions for the y axis. This lead to a tensor of shape (3, 5, 2). """ with tf.name_scope(name): starts = tf.convert_to_tensor(value=starts) stops = tf.convert_to_tensor(value=stops) nums = tf.convert_to_tensor(value=nums) shape.check_static( tensor=starts, tensor_name="starts", has_rank_greater_than=0, has_rank_less_than=3) shape.check_static( tensor=stops, tensor_name="stops", has_rank_greater_than=0, has_rank_less_than=3) shape.check_static(tensor=nums, tensor_name="nums", has_rank=1) shape.compare_batch_dimensions( tensors=(starts, stops), last_axes=(-1, -1), broadcast_compatible=False) shape.compare_dimensions((starts, stops, nums), -1, ("starts", "stops", "nums")) if starts.shape.ndims == 1: return _grid(starts, stops, nums) else: return tf.stack([ _grid(starts, stops, nums) for starts, stops in zip(tf.unstack(starts), tf.unstack(stops)) ]) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/rendering/opengl/cleanup.h
/* Copyright 2020 The TensorFlow Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_CLEANUP_H_ #define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_CLEANUP_H_ #include <type_traits> #include <utility> // A move-only RAII object that calls a stored cleanup functor when // destroyed. Cleanup<F> is the return type of MakeCleanup(F). template <typename F> class Cleanup { public: Cleanup() : released_(true), f_() {} template <typename G> explicit Cleanup(G&& f) // NOLINT : f_(std::forward<G>(f)) {} // NOLINT(build/c++11) Cleanup(Cleanup&& src) // NOLINT : released_(src.is_released()), f_(src.release()) { } // Implicitly move-constructible from any compatible Cleanup<G>. // The source will be released as if src.release() were called. // A moved-from Cleanup can be safely destroyed or reassigned. template <typename G> Cleanup(Cleanup<G>&& src) // NOLINT : released_(src.is_released()), f_(src.release()) { } // Assignment to a Cleanup object behaves like destroying it // and making a new one in its place, analogous to unique_ptr // semantics. Cleanup& operator=(Cleanup&& src) { // NOLINT if (!released_) std::move(f_)(); released_ = src.released_; f_ = src.release(); return *this; } ~Cleanup() { if (!released_) std::move(f_)(); } // Releases the cleanup function instead of running it. // Hint: use c.release()() to run early. F release() { released_ = true; return std::move(f_); } bool is_released() const { return released_; } private: static_assert(!std::is_reference<F>::value, "F must not be a reference"); bool released_ = false; F f_; }; // MakeCleanup(f) returns an RAII cleanup object that calls 'f' in its // destructor. The easiest way to use MakeCleanup is with a lambda argument, // capturing the return value in an 'auto' local variable. Most users will not // need more sophisticated syntax than that. // // Example: // void func() { // FILE* fp = fopen("data.txt", "r"); // if (fp == nullptr) return; // auto fp_cleaner = gtl::MakeCleanup([fp] { fclose(fp); }); // // No matter what, fclose(fp) will happen. // DataObject d; // while (ReadDataObject(fp, &d)) { // if (d.IsBad()) { // LOG(ERROR) << "Bad Data"; // return; // } // PushGoodData(d); // } // } // // You can use Cleanup<F> directly, instead of using MakeCleanup and auto, // but there's rarely a reason to do that. // // You can call 'release()' on a Cleanup object to cancel the cleanup. template <int&... ExplicitParameterBarrier, typename F, typename DecayF = typename std::decay<F>::type> Cleanup<DecayF> MakeCleanup(F&& f) { return Cleanup<DecayF>(std::forward<F>(f)); } #endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_CLEANUP_H_
/* Copyright 2020 The TensorFlow Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_CLEANUP_H_ #define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_CLEANUP_H_ #include <type_traits> #include <utility> // A move-only RAII object that calls a stored cleanup functor when // destroyed. Cleanup<F> is the return type of MakeCleanup(F). template <typename F> class Cleanup { public: Cleanup() : released_(true), f_() {} template <typename G> explicit Cleanup(G&& f) // NOLINT : f_(std::forward<G>(f)) {} // NOLINT(build/c++11) Cleanup(Cleanup&& src) // NOLINT : released_(src.is_released()), f_(src.release()) { } // Implicitly move-constructible from any compatible Cleanup<G>. // The source will be released as if src.release() were called. // A moved-from Cleanup can be safely destroyed or reassigned. template <typename G> Cleanup(Cleanup<G>&& src) // NOLINT : released_(src.is_released()), f_(src.release()) { } // Assignment to a Cleanup object behaves like destroying it // and making a new one in its place, analogous to unique_ptr // semantics. Cleanup& operator=(Cleanup&& src) { // NOLINT if (!released_) std::move(f_)(); released_ = src.released_; f_ = src.release(); return *this; } ~Cleanup() { if (!released_) std::move(f_)(); } // Releases the cleanup function instead of running it. // Hint: use c.release()() to run early. F release() { released_ = true; return std::move(f_); } bool is_released() const { return released_; } private: static_assert(!std::is_reference<F>::value, "F must not be a reference"); bool released_ = false; F f_; }; // MakeCleanup(f) returns an RAII cleanup object that calls 'f' in its // destructor. The easiest way to use MakeCleanup is with a lambda argument, // capturing the return value in an 'auto' local variable. Most users will not // need more sophisticated syntax than that. // // Example: // void func() { // FILE* fp = fopen("data.txt", "r"); // if (fp == nullptr) return; // auto fp_cleaner = gtl::MakeCleanup([fp] { fclose(fp); }); // // No matter what, fclose(fp) will happen. // DataObject d; // while (ReadDataObject(fp, &d)) { // if (d.IsBad()) { // LOG(ERROR) << "Bad Data"; // return; // } // PushGoodData(d); // } // } // // You can use Cleanup<F> directly, instead of using MakeCleanup and auto, // but there's rarely a reason to do that. // // You can call 'release()' on a Cleanup object to cancel the cleanup. template <int&... ExplicitParameterBarrier, typename F, typename DecayF = typename std::decay<F>::type> Cleanup<DecayF> MakeCleanup(F&& f) { return Cleanup<DecayF>(std::forward<F>(f)); } #endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_CLEANUP_H_
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/projects/pointnet/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PointNet module."""
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PointNet module."""
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/rendering/opengl/egl_util.h
/* Copyright 2020 The TensorFlow Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_UTIL_H_ #define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_UTIL_H_ #include <EGL/egl.h> #ifdef __cplusplus extern "C" { #endif // Creates and initializes an EGL display at the specified device_index. Unlike // the standard eglGetDisplay(), this function takes a device_index, iterates // through all the available devices on the machine using EGL extensions, and // returns the Nth successfully initialized EGLDisplay. This allows us to get a // valid EGL display on multi-GPU machines, where we limit access to a sub-set // of the available GPU devices. Returns an initialized EGLDisplay or // EGL_NO_DISPLAY on error. EGLDisplay CreateInitializedEGLDisplayAtIndex(int device_index); // Helper function to create EGL display at device index 0. EGLDisplay CreateInitializedEGLDisplay(void); // Helper function to only call eglTerminate() once all instances created from // CreateInitializedEGLDisplay() have been terminated. This is necessary because // calling eglTerminate will invalidate *all* contexts associated with a given // display within the same address space. EGLBoolean TerminateInitializedEGLDisplay(EGLDisplay display); // Helper function that unloads any remaining resources used for internal // bookkeeping. Ordinary user code generally should not need to call this, // but it is useful when, say, using this code as part of a DSO that is // loaded and unloaded repeatedly. This function must not be called more // than once per process (or DSO load). It should generally be called just // before exit. void ShutDownEGLSubsystem(void); #ifdef __cplusplus } // extern "C" #endif #endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_UTIL_H_
/* Copyright 2020 The TensorFlow Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_UTIL_H_ #define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_UTIL_H_ #include <EGL/egl.h> #ifdef __cplusplus extern "C" { #endif // Creates and initializes an EGL display at the specified device_index. Unlike // the standard eglGetDisplay(), this function takes a device_index, iterates // through all the available devices on the machine using EGL extensions, and // returns the Nth successfully initialized EGLDisplay. This allows us to get a // valid EGL display on multi-GPU machines, where we limit access to a sub-set // of the available GPU devices. Returns an initialized EGLDisplay or // EGL_NO_DISPLAY on error. EGLDisplay CreateInitializedEGLDisplayAtIndex(int device_index); // Helper function to create EGL display at device index 0. EGLDisplay CreateInitializedEGLDisplay(void); // Helper function to only call eglTerminate() once all instances created from // CreateInitializedEGLDisplay() have been terminated. This is necessary because // calling eglTerminate will invalidate *all* contexts associated with a given // display within the same address space. EGLBoolean TerminateInitializedEGLDisplay(EGLDisplay display); // Helper function that unloads any remaining resources used for internal // bookkeeping. Ordinary user code generally should not need to call this, // but it is useful when, say, using this code as part of a DSO that is // loaded and unloaded repeatedly. This function must not be called more // than once per process (or DSO load). It should generally be called just // before exit. void ShutDownEGLSubsystem(void); #ifdef __cplusplus } // extern "C" #endif #endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_UTIL_H_
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/geometry/transformation/axis_angle.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""This module implements axis-angle functionalities. The axis-angle representation is defined as $$\theta\mathbf{a}$$, where $$\mathbf{a}$$ is a unit vector indicating the direction of rotation and $$\theta$$ is a scalar controlling the angle of rotation. It is important to note that the axis-angle does not perform rotation by itself, but that it can be used to rotate any given vector $$\mathbf{v} \in {\mathbb{R}^3}$$ into a vector $$\mathbf{v}'$$ using the Rodrigues' rotation formula: $$\mathbf{v}'=\mathbf{v}\cos(\theta)+(\mathbf{a}\times\mathbf{v})\sin(\theta) +\mathbf{a}(\mathbf{a}\cdot\mathbf{v})(1-\cos(\theta)).$$ More details about the axis-angle formalism can be found on [this page.] (https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation) Note: Some of the functions defined in the module expect a normalized axis $$\mathbf{a} = [x, y, z]^T$$ as inputs where $$x^2 + y^2 + z^2 = 1$$. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.geometry.transformation import quaternion as quaternion_lib from tensorflow_graphics.geometry.transformation import rotation_matrix_3d from tensorflow_graphics.math import vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import safe_ops from tensorflow_graphics.util import shape def from_euler(angles, name="axis_angle_from_euler"): r"""Converts Euler angles to an axis-angle representation. Note: The conversion is performed by first converting to a quaternion representation, and then by converting the quaternion to an axis-angle. Note: In the following, A1 to An are optional batch dimensions. Args: angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the three Euler angles. `[A1, ..., An, 0]` is the angle about `x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians and `[A1, ..., An, 2]` is the angle about `z` in radians. name: A name for this op that defaults to "axis_angle_from_euler". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. """ with tf.name_scope(name): quaternion = quaternion_lib.from_euler(angles) return from_quaternion(quaternion) def from_euler_with_small_angles_approximation( angles, name="axis_angle_from_euler_with_small_angles_approximation"): r"""Converts small Euler angles to an axis-angle representation. Under the small angle assumption, $$\sin(x)$$ and $$\cos(x)$$ can be approximated by their second order Taylor expansions, where $$\sin(x) \approx x$$ and $$\cos(x) \approx 1 - \frac{x^2}{2}$$. In the current implementation, the smallness of the angles is not verified. Note: The conversion is performed by first converting to a quaternion representation, and then by converting the quaternion to an axis-angle. Note: In the following, A1 to An are optional batch dimensions. Args: angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the three small Euler angles. `[A1, ..., An, 0]` is the angle about `x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians and `[A1, ..., An, 2]` is the angle about `z` in radians. name: A name for this op that defaults to "axis_angle_from_euler_with_small_angles_approximation". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. """ with tf.name_scope(name): quaternion = quaternion_lib.from_euler_with_small_angles_approximation( angles) return from_quaternion(quaternion) def from_quaternion(quaternion, name="axis_angle_from_quaternion"): """Converts a quaternion to an axis-angle representation. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. name: A name for this op that defaults to "axis_angle_from_quaternion". Returns: Tuple of two tensors of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. Raises: ValueError: If the shape of `quaternion` is not supported. """ with tf.name_scope(name): quaternion = tf.convert_to_tensor(value=quaternion) shape.check_static( tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4)) quaternion = asserts.assert_normalized(quaternion) # This prevents zero norm xyz and zero w, and is differentiable. quaternion += asserts.select_eps_for_addition(quaternion.dtype) xyz, w = tf.split(quaternion, (3, 1), axis=-1) norm = tf.norm(tensor=xyz, axis=-1, keepdims=True) angle = 2.0 * tf.atan2(norm, tf.abs(w)) axis = safe_ops.safe_unsigned_div(safe_ops.nonzero_sign(w) * xyz, norm) return axis, angle def from_rotation_matrix(rotation_matrix, name="axis_angle_from_rotation_matrix"): """Converts a rotation matrix to an axis-angle representation. Note: In the current version the returned axis-angle representation is not unique for a given rotation matrix. Since a direct conversion would not really be faster, we first transform the rotation matrix to a quaternion, and finally perform the conversion from that quaternion to the corresponding axis-angle representation. Note: In the following, A1 to An are optional batch dimensions. Args: rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`, where the last two dimensions represent a rotation matrix. name: A name for this op that defaults to "axis_angle_from_rotation_matrix". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. Raises: ValueError: If the shape of `rotation_matrix` is not supported. """ with tf.name_scope(name): rotation_matrix = tf.convert_to_tensor(value=rotation_matrix) shape.check_static( tensor=rotation_matrix, tensor_name="rotation_matrix", has_rank_greater_than=1, has_dim_equals=((-2, 3), (-1, 3))) rotation_matrix = rotation_matrix_3d.assert_rotation_matrix_normalized( rotation_matrix) quaternion = quaternion_lib.from_rotation_matrix(rotation_matrix) return from_quaternion(quaternion) def inverse(axis, angle, name="axis_angle_inverse"): """Computes the axis-angle that is the inverse of the input axis-angle. Note: In the following, A1 to An are optional batch dimensions. Args: axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized axis. angle: A tensor of shape `[A1, ..., An, 1]` where the last dimension represents an angle. name: A name for this op that defaults to "axis_angle_inverse". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. Raises: ValueError: If the shape of `axis` or `angle` is not supported. """ with tf.name_scope(name): axis = tf.convert_to_tensor(value=axis) angle = tf.convert_to_tensor(value=angle) shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3)) shape.check_static( tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1)) shape.compare_batch_dimensions( tensors=(axis, angle), tensor_names=("axis", "angle"), last_axes=-2, broadcast_compatible=True) axis = asserts.assert_normalized(axis) return axis, -angle def is_normalized(axis, angle, atol=1e-3, name="axis_angle_is_normalized"): """Determines if the axis-angle is normalized or not. Note: In the following, A1 to An are optional batch dimensions. Args: axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized axis. angle: A tensor of shape `[A1, ..., An, 1]` where the last dimension represents an angle. atol: The absolute tolerance parameter. name: A name for this op that defaults to "axis_angle_is_normalized". Returns: A tensor of shape `[A1, ..., An, 1]`, where False indicates that the axis is not normalized. """ with tf.name_scope(name): axis = tf.convert_to_tensor(value=axis) angle = tf.convert_to_tensor(value=angle) shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3)) shape.check_static( tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1)) shape.compare_batch_dimensions( tensors=(axis, angle), tensor_names=("axis", "angle"), last_axes=-2, broadcast_compatible=True) norms = tf.norm(tensor=axis, axis=-1, keepdims=True) return tf.abs(norms - 1.) < atol def rotate(point, axis, angle, name="axis_angle_rotate"): r"""Rotates a 3d point using an axis-angle by applying the Rodrigues' formula. Rotates a vector $$\mathbf{v} \in {\mathbb{R}^3}$$ into a vector $$\mathbf{v}' \in {\mathbb{R}^3}$$ using the Rodrigues' rotation formula: $$\mathbf{v}'=\mathbf{v}\cos(\theta)+(\mathbf{a}\times\mathbf{v})\sin(\theta) +\mathbf{a}(\mathbf{a}\cdot\mathbf{v})(1-\cos(\theta)).$$ Note: In the following, A1 to An are optional batch dimensions. Args: point: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a 3d point to rotate. axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized axis. angle: A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents an angle. name: A name for this op that defaults to "axis_angle_rotate". Returns: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a 3d point. Raises: ValueError: If `point`, `axis`, or `angle` are of different shape or if their respective shape is not supported. """ with tf.name_scope(name): point = tf.convert_to_tensor(value=point) axis = tf.convert_to_tensor(value=axis) angle = tf.convert_to_tensor(value=angle) shape.check_static( tensor=point, tensor_name="point", has_dim_equals=(-1, 3)) shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3)) shape.check_static( tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1)) shape.compare_batch_dimensions( tensors=(point, axis, angle), tensor_names=("point", "axis", "angle"), last_axes=-2, broadcast_compatible=True) axis = asserts.assert_normalized(axis) cos_angle = tf.cos(angle) axis_dot_point = vector.dot(axis, point) return point * cos_angle + vector.cross( axis, point) * tf.sin(angle) + axis * axis_dot_point * (1.0 - cos_angle) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""This module implements axis-angle functionalities. The axis-angle representation is defined as $$\theta\mathbf{a}$$, where $$\mathbf{a}$$ is a unit vector indicating the direction of rotation and $$\theta$$ is a scalar controlling the angle of rotation. It is important to note that the axis-angle does not perform rotation by itself, but that it can be used to rotate any given vector $$\mathbf{v} \in {\mathbb{R}^3}$$ into a vector $$\mathbf{v}'$$ using the Rodrigues' rotation formula: $$\mathbf{v}'=\mathbf{v}\cos(\theta)+(\mathbf{a}\times\mathbf{v})\sin(\theta) +\mathbf{a}(\mathbf{a}\cdot\mathbf{v})(1-\cos(\theta)).$$ More details about the axis-angle formalism can be found on [this page.] (https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation) Note: Some of the functions defined in the module expect a normalized axis $$\mathbf{a} = [x, y, z]^T$$ as inputs where $$x^2 + y^2 + z^2 = 1$$. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.geometry.transformation import quaternion as quaternion_lib from tensorflow_graphics.geometry.transformation import rotation_matrix_3d from tensorflow_graphics.math import vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import safe_ops from tensorflow_graphics.util import shape def from_euler(angles, name="axis_angle_from_euler"): r"""Converts Euler angles to an axis-angle representation. Note: The conversion is performed by first converting to a quaternion representation, and then by converting the quaternion to an axis-angle. Note: In the following, A1 to An are optional batch dimensions. Args: angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the three Euler angles. `[A1, ..., An, 0]` is the angle about `x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians and `[A1, ..., An, 2]` is the angle about `z` in radians. name: A name for this op that defaults to "axis_angle_from_euler". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. """ with tf.name_scope(name): quaternion = quaternion_lib.from_euler(angles) return from_quaternion(quaternion) def from_euler_with_small_angles_approximation( angles, name="axis_angle_from_euler_with_small_angles_approximation"): r"""Converts small Euler angles to an axis-angle representation. Under the small angle assumption, $$\sin(x)$$ and $$\cos(x)$$ can be approximated by their second order Taylor expansions, where $$\sin(x) \approx x$$ and $$\cos(x) \approx 1 - \frac{x^2}{2}$$. In the current implementation, the smallness of the angles is not verified. Note: The conversion is performed by first converting to a quaternion representation, and then by converting the quaternion to an axis-angle. Note: In the following, A1 to An are optional batch dimensions. Args: angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the three small Euler angles. `[A1, ..., An, 0]` is the angle about `x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians and `[A1, ..., An, 2]` is the angle about `z` in radians. name: A name for this op that defaults to "axis_angle_from_euler_with_small_angles_approximation". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. """ with tf.name_scope(name): quaternion = quaternion_lib.from_euler_with_small_angles_approximation( angles) return from_quaternion(quaternion) def from_quaternion(quaternion, name="axis_angle_from_quaternion"): """Converts a quaternion to an axis-angle representation. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. name: A name for this op that defaults to "axis_angle_from_quaternion". Returns: Tuple of two tensors of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. Raises: ValueError: If the shape of `quaternion` is not supported. """ with tf.name_scope(name): quaternion = tf.convert_to_tensor(value=quaternion) shape.check_static( tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4)) quaternion = asserts.assert_normalized(quaternion) # This prevents zero norm xyz and zero w, and is differentiable. quaternion += asserts.select_eps_for_addition(quaternion.dtype) xyz, w = tf.split(quaternion, (3, 1), axis=-1) norm = tf.norm(tensor=xyz, axis=-1, keepdims=True) angle = 2.0 * tf.atan2(norm, tf.abs(w)) axis = safe_ops.safe_unsigned_div(safe_ops.nonzero_sign(w) * xyz, norm) return axis, angle def from_rotation_matrix(rotation_matrix, name="axis_angle_from_rotation_matrix"): """Converts a rotation matrix to an axis-angle representation. Note: In the current version the returned axis-angle representation is not unique for a given rotation matrix. Since a direct conversion would not really be faster, we first transform the rotation matrix to a quaternion, and finally perform the conversion from that quaternion to the corresponding axis-angle representation. Note: In the following, A1 to An are optional batch dimensions. Args: rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`, where the last two dimensions represent a rotation matrix. name: A name for this op that defaults to "axis_angle_from_rotation_matrix". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. Raises: ValueError: If the shape of `rotation_matrix` is not supported. """ with tf.name_scope(name): rotation_matrix = tf.convert_to_tensor(value=rotation_matrix) shape.check_static( tensor=rotation_matrix, tensor_name="rotation_matrix", has_rank_greater_than=1, has_dim_equals=((-2, 3), (-1, 3))) rotation_matrix = rotation_matrix_3d.assert_rotation_matrix_normalized( rotation_matrix) quaternion = quaternion_lib.from_rotation_matrix(rotation_matrix) return from_quaternion(quaternion) def inverse(axis, angle, name="axis_angle_inverse"): """Computes the axis-angle that is the inverse of the input axis-angle. Note: In the following, A1 to An are optional batch dimensions. Args: axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized axis. angle: A tensor of shape `[A1, ..., An, 1]` where the last dimension represents an angle. name: A name for this op that defaults to "axis_angle_inverse". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. Raises: ValueError: If the shape of `axis` or `angle` is not supported. """ with tf.name_scope(name): axis = tf.convert_to_tensor(value=axis) angle = tf.convert_to_tensor(value=angle) shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3)) shape.check_static( tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1)) shape.compare_batch_dimensions( tensors=(axis, angle), tensor_names=("axis", "angle"), last_axes=-2, broadcast_compatible=True) axis = asserts.assert_normalized(axis) return axis, -angle def is_normalized(axis, angle, atol=1e-3, name="axis_angle_is_normalized"): """Determines if the axis-angle is normalized or not. Note: In the following, A1 to An are optional batch dimensions. Args: axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized axis. angle: A tensor of shape `[A1, ..., An, 1]` where the last dimension represents an angle. atol: The absolute tolerance parameter. name: A name for this op that defaults to "axis_angle_is_normalized". Returns: A tensor of shape `[A1, ..., An, 1]`, where False indicates that the axis is not normalized. """ with tf.name_scope(name): axis = tf.convert_to_tensor(value=axis) angle = tf.convert_to_tensor(value=angle) shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3)) shape.check_static( tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1)) shape.compare_batch_dimensions( tensors=(axis, angle), tensor_names=("axis", "angle"), last_axes=-2, broadcast_compatible=True) norms = tf.norm(tensor=axis, axis=-1, keepdims=True) return tf.abs(norms - 1.) < atol def rotate(point, axis, angle, name="axis_angle_rotate"): r"""Rotates a 3d point using an axis-angle by applying the Rodrigues' formula. Rotates a vector $$\mathbf{v} \in {\mathbb{R}^3}$$ into a vector $$\mathbf{v}' \in {\mathbb{R}^3}$$ using the Rodrigues' rotation formula: $$\mathbf{v}'=\mathbf{v}\cos(\theta)+(\mathbf{a}\times\mathbf{v})\sin(\theta) +\mathbf{a}(\mathbf{a}\cdot\mathbf{v})(1-\cos(\theta)).$$ Note: In the following, A1 to An are optional batch dimensions. Args: point: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a 3d point to rotate. axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized axis. angle: A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents an angle. name: A name for this op that defaults to "axis_angle_rotate". Returns: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a 3d point. Raises: ValueError: If `point`, `axis`, or `angle` are of different shape or if their respective shape is not supported. """ with tf.name_scope(name): point = tf.convert_to_tensor(value=point) axis = tf.convert_to_tensor(value=axis) angle = tf.convert_to_tensor(value=angle) shape.check_static( tensor=point, tensor_name="point", has_dim_equals=(-1, 3)) shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3)) shape.check_static( tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1)) shape.compare_batch_dimensions( tensors=(point, axis, angle), tensor_names=("point", "axis", "angle"), last_axes=-2, broadcast_compatible=True) axis = asserts.assert_normalized(axis) cos_angle = tf.cos(angle) axis_dot_point = vector.dot(axis, point) return point * cos_angle + vector.cross( axis, point) * tf.sin(angle) + axis * axis_dot_point * (1.0 - cos_angle) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/projects/local_implicit_grid/README.md
## Local Implicit Grid Representations for 3D Scenes By: [Chiyu "Max" Jiang](http://maxjiang.ml/), [Avneesh Sud](https://research.google/people/105052/), [Ameesh Makadia](http://www.ameeshmakadia.com/index.html), [Jingwei Huang](http://stanford.edu/~jingweih/), [Matthias Niessner](http://niessnerlab.org/members/matthias_niessner/profile.html), [Thomas Funkhouser](https://www.cs.princeton.edu/~funk/) \[[Project Website](http://maxjiang.ml/proj/lig)\] \[[Paper PDF Preprint](https://arxiv.org/abs/2003.08981)\] ![teaser](https://storage.googleapis.com/local-implicit-grids/lig_teaser.gif) ### Introduction This repository is based on our CVPR 2020 paper: [Local Implicit Grid Representations for 3D Scenes](https://arxiv.org/abs/2003.08981). The [project webpage](http://maxjiang.ml/proj/lig) presents an overview of the project. Shape priors learned from data are commonly used to reconstruct 3D objects from partial or noisy data. Yet no such shape priors are available for indoor scenes, since typical 3D autoencoders cannot handle their scale, complexity, or diversity. In this paper, we introduce Local Implicit Grid Representations, a new 3D shape representation designed for scalability and generality. The motivating idea is that most 3D surfaces share geometric details at some scale -- i.e., at a scale smaller than an entire object and larger than a small patch. We train an autoencoder to learn an embedding of local crops of 3D shapes at that size. Then, we use the decoder as a component in a shape optimization that solves for a set of latent codes on a regular grid of overlapping crops such that an interpolation of the decoded local shapes matches a partial or noisy observation. We demonstrate the value of this proposed approach for 3D surface reconstruction from sparse point observations, showing significantly better results than alternative approaches. Our deep learning code base is written using [Tensorflow](https://www.tensorflow.org/). ### Getting started Code is tested with python 3.7+ and tensorflow 1.14+. Please install the necessary dependencies. `pip` is a recommended way to do this. ```bash pip install -r requirements.txt ``` ### Scene reconstruction using pretrained part encoding Currently we are releasing the evaluation code to use our pretrained model for scene reconstruction, along with definitions for the local implicit grid layer and part-autoencoder model. To directly use our script for surface reconstruction, prepare the input point cloud as a `.ply` file with vertex attributes: `x, y, z, nx, ny, nz`. See `resample_geometry.py` for creating an input `.ply` file from a mesh. For demo input data, refer to the inputs under `demo_data/`. To reconstruct a meshed surface given an input point cloud, run `reconstruct_geometry.py` as follows: ```bash # Be sure to add root of tensorflow_graphics direectory to your PYTHONPATH # Assuming PWD=<path/to/teensorflow_graphics> export PYTHONPATH="$PWD:$PYTHONPATH" pushd tensorflow_graphics/projects/local_implicit_grid/ # using one GPU is sufficient export CUDA_VISIBLE_DEVICES=0 # download the model weights. wget https://storage.googleapis.com/local-implicit-grids/pretrained_ckpt.zip unzip pretrained_ckpt.zip; rm pretrained_ckpt.zip # fetch a test object and compute point cloud. mkdir -p demo_data wget https://cs.uwaterloo.ca/~c2batty/bunny_watertight.obj mv bunny_watertight.obj demo_data # reconstruct an object. since objects are much smaller than entire scenes, # we can use a smaller point number and number of optimization steps to speed # up. python reconstruct_geometry.py \ --input_ply demo_data/bunny.ply \ --part_size=0.20 --npoints=2048 --steps=3001 # download more demo data for scene reconstruction. wget http://storage.googleapis.com/local-implicit-grids/demo_data.zip unzip demo_data.zip; rm demo_data.zip # reconstruct a dense scene python reconstruct_geometry.py \ --input_ply demo_data/living_room_33_1000_per_m2.ply \ --part_size=0.25 # reconstruct a sparser scene using a larger part size python reconstruct_geometry.py \ --input_ply demo_data/living_room_33_100_per_m2.ply \ --part_size=0.50 ``` The part size parameter controls the granularity of the local implicit grid. For scenes it should be in the range of 0.25 - 0.5 (meters). For objects, it depends on the scale of the coordinates. Generally for normalized objects (max bounding box length ~ 1) use a part size of ~0.2. Generally `part_size` should not be greater than 1/4 of the minimum bounding box width. ### References If you find our code or paper useful, please consider citing @inproceedings{Local_Implicit_Grid_CVPR20, title = {Local Implicit Grid Representations for 3D Scenes}, author = {Chiyu Max Jiang and Avneesh Sud and Ameesh Makadia and Jingwei Huang and Matthias Nießner and Thomas Funkhouser}, booktitle = {Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)}, year = {2020} } ### Contact Please contact [Max Jiang](mailto:maxjiang93@gmail.com) or [Avneesh Sud](mailto:avneesh@google.com) if you have further questions!
## Local Implicit Grid Representations for 3D Scenes By: [Chiyu "Max" Jiang](http://maxjiang.ml/), [Avneesh Sud](https://research.google/people/105052/), [Ameesh Makadia](http://www.ameeshmakadia.com/index.html), [Jingwei Huang](http://stanford.edu/~jingweih/), [Matthias Niessner](http://niessnerlab.org/members/matthias_niessner/profile.html), [Thomas Funkhouser](https://www.cs.princeton.edu/~funk/) \[[Project Website](http://maxjiang.ml/proj/lig)\] \[[Paper PDF Preprint](https://arxiv.org/abs/2003.08981)\] ![teaser](https://storage.googleapis.com/local-implicit-grids/lig_teaser.gif) ### Introduction This repository is based on our CVPR 2020 paper: [Local Implicit Grid Representations for 3D Scenes](https://arxiv.org/abs/2003.08981). The [project webpage](http://maxjiang.ml/proj/lig) presents an overview of the project. Shape priors learned from data are commonly used to reconstruct 3D objects from partial or noisy data. Yet no such shape priors are available for indoor scenes, since typical 3D autoencoders cannot handle their scale, complexity, or diversity. In this paper, we introduce Local Implicit Grid Representations, a new 3D shape representation designed for scalability and generality. The motivating idea is that most 3D surfaces share geometric details at some scale -- i.e., at a scale smaller than an entire object and larger than a small patch. We train an autoencoder to learn an embedding of local crops of 3D shapes at that size. Then, we use the decoder as a component in a shape optimization that solves for a set of latent codes on a regular grid of overlapping crops such that an interpolation of the decoded local shapes matches a partial or noisy observation. We demonstrate the value of this proposed approach for 3D surface reconstruction from sparse point observations, showing significantly better results than alternative approaches. Our deep learning code base is written using [Tensorflow](https://www.tensorflow.org/). ### Getting started Code is tested with python 3.7+ and tensorflow 1.14+. Please install the necessary dependencies. `pip` is a recommended way to do this. ```bash pip install -r requirements.txt ``` ### Scene reconstruction using pretrained part encoding Currently we are releasing the evaluation code to use our pretrained model for scene reconstruction, along with definitions for the local implicit grid layer and part-autoencoder model. To directly use our script for surface reconstruction, prepare the input point cloud as a `.ply` file with vertex attributes: `x, y, z, nx, ny, nz`. See `resample_geometry.py` for creating an input `.ply` file from a mesh. For demo input data, refer to the inputs under `demo_data/`. To reconstruct a meshed surface given an input point cloud, run `reconstruct_geometry.py` as follows: ```bash # Be sure to add root of tensorflow_graphics direectory to your PYTHONPATH # Assuming PWD=<path/to/teensorflow_graphics> export PYTHONPATH="$PWD:$PYTHONPATH" pushd tensorflow_graphics/projects/local_implicit_grid/ # using one GPU is sufficient export CUDA_VISIBLE_DEVICES=0 # download the model weights. wget https://storage.googleapis.com/local-implicit-grids/pretrained_ckpt.zip unzip pretrained_ckpt.zip; rm pretrained_ckpt.zip # fetch a test object and compute point cloud. mkdir -p demo_data wget https://cs.uwaterloo.ca/~c2batty/bunny_watertight.obj mv bunny_watertight.obj demo_data # reconstruct an object. since objects are much smaller than entire scenes, # we can use a smaller point number and number of optimization steps to speed # up. python reconstruct_geometry.py \ --input_ply demo_data/bunny.ply \ --part_size=0.20 --npoints=2048 --steps=3001 # download more demo data for scene reconstruction. wget http://storage.googleapis.com/local-implicit-grids/demo_data.zip unzip demo_data.zip; rm demo_data.zip # reconstruct a dense scene python reconstruct_geometry.py \ --input_ply demo_data/living_room_33_1000_per_m2.ply \ --part_size=0.25 # reconstruct a sparser scene using a larger part size python reconstruct_geometry.py \ --input_ply demo_data/living_room_33_100_per_m2.ply \ --part_size=0.50 ``` The part size parameter controls the granularity of the local implicit grid. For scenes it should be in the range of 0.25 - 0.5 (meters). For objects, it depends on the scale of the coordinates. Generally for normalized objects (max bounding box length ~ 1) use a part size of ~0.2. Generally `part_size` should not be greater than 1/4 of the minimum bounding box width. ### References If you find our code or paper useful, please consider citing @inproceedings{Local_Implicit_Grid_CVPR20, title = {Local Implicit Grid Representations for 3D Scenes}, author = {Chiyu Max Jiang and Avneesh Sud and Ameesh Makadia and Jingwei Huang and Matthias Nießner and Thomas Funkhouser}, booktitle = {Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)}, year = {2020} } ### Contact Please contact [Max Jiang](mailto:maxjiang93@gmail.com) or [Avneesh Sud](mailto:avneesh@google.com) if you have further questions!
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/nn/layer/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Layer module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_graphics.nn.layer import graph_convolution from tensorflow_graphics.nn.layer import pointnet from tensorflow_graphics.util import export_api as _export_api # API contains submodules of tensorflow_graphics.geometry. __all__ = _export_api.get_modules()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Layer module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_graphics.nn.layer import graph_convolution from tensorflow_graphics.nn.layer import pointnet from tensorflow_graphics.util import export_api as _export_api # API contains submodules of tensorflow_graphics.geometry. __all__ = _export_api.get_modules()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/math/interpolation/tests/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./.git/hooks/pre-rebase.sample
#!/bin/sh # # Copyright (c) 2006, 2008 Junio C Hamano # # The "pre-rebase" hook is run just before "git rebase" starts doing # its job, and can prevent the command from running by exiting with # non-zero status. # # The hook is called with the following parameters: # # $1 -- the upstream the series was forked from. # $2 -- the branch being rebased (or empty when rebasing the current branch). # # This sample shows how to prevent topic branches that are already # merged to 'next' branch from getting rebased, because allowing it # would result in rebasing already published history. publish=next basebranch="$1" if test "$#" = 2 then topic="refs/heads/$2" else topic=`git symbolic-ref HEAD` || exit 0 ;# we do not interrupt rebasing detached HEAD fi case "$topic" in refs/heads/??/*) ;; *) exit 0 ;# we do not interrupt others. ;; esac # Now we are dealing with a topic branch being rebased # on top of master. Is it OK to rebase it? # Does the topic really exist? git show-ref -q "$topic" || { echo >&2 "No such branch $topic" exit 1 } # Is topic fully merged to master? not_in_master=`git rev-list --pretty=oneline ^master "$topic"` if test -z "$not_in_master" then echo >&2 "$topic is fully merged to master; better remove it." exit 1 ;# we could allow it, but there is no point. fi # Is topic ever merged to next? If so you should not be rebasing it. only_next_1=`git rev-list ^master "^$topic" ${publish} | sort` only_next_2=`git rev-list ^master ${publish} | sort` if test "$only_next_1" = "$only_next_2" then not_in_topic=`git rev-list "^$topic" master` if test -z "$not_in_topic" then echo >&2 "$topic is already up to date with master" exit 1 ;# we could allow it, but there is no point. else exit 0 fi else not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"` /usr/bin/perl -e ' my $topic = $ARGV[0]; my $msg = "* $topic has commits already merged to public branch:\n"; my (%not_in_next) = map { /^([0-9a-f]+) /; ($1 => 1); } split(/\n/, $ARGV[1]); for my $elem (map { /^([0-9a-f]+) (.*)$/; [$1 => $2]; } split(/\n/, $ARGV[2])) { if (!exists $not_in_next{$elem->[0]}) { if ($msg) { print STDERR $msg; undef $msg; } print STDERR " $elem->[1]\n"; } } ' "$topic" "$not_in_next" "$not_in_master" exit 1 fi <<\DOC_END This sample hook safeguards topic branches that have been published from being rewound. The workflow assumed here is: * Once a topic branch forks from "master", "master" is never merged into it again (either directly or indirectly). * Once a topic branch is fully cooked and merged into "master", it is deleted. If you need to build on top of it to correct earlier mistakes, a new topic branch is created by forking at the tip of the "master". This is not strictly necessary, but it makes it easier to keep your history simple. * Whenever you need to test or publish your changes to topic branches, merge them into "next" branch. The script, being an example, hardcodes the publish branch name to be "next", but it is trivial to make it configurable via $GIT_DIR/config mechanism. With this workflow, you would want to know: (1) ... if a topic branch has ever been merged to "next". Young topic branches can have stupid mistakes you would rather clean up before publishing, and things that have not been merged into other branches can be easily rebased without affecting other people. But once it is published, you would not want to rewind it. (2) ... if a topic branch has been fully merged to "master". Then you can delete it. More importantly, you should not build on top of it -- other people may already want to change things related to the topic as patches against your "master", so if you need further changes, it is better to fork the topic (perhaps with the same name) afresh from the tip of "master". Let's look at this example: o---o---o---o---o---o---o---o---o---o "next" / / / / / a---a---b A / / / / / / / / c---c---c---c B / / / / \ / / / / b---b C \ / / / / / \ / ---o---o---o---o---o---o---o---o---o---o---o "master" A, B and C are topic branches. * A has one fix since it was merged up to "next". * B has finished. It has been fully merged up to "master" and "next", and is ready to be deleted. * C has not merged to "next" at all. We would want to allow C to be rebased, refuse A, and encourage B to be deleted. To compute (1): git rev-list ^master ^topic next git rev-list ^master next if these match, topic has not merged in next at all. To compute (2): git rev-list master..topic if this is empty, it is fully merged to "master". DOC_END
#!/bin/sh # # Copyright (c) 2006, 2008 Junio C Hamano # # The "pre-rebase" hook is run just before "git rebase" starts doing # its job, and can prevent the command from running by exiting with # non-zero status. # # The hook is called with the following parameters: # # $1 -- the upstream the series was forked from. # $2 -- the branch being rebased (or empty when rebasing the current branch). # # This sample shows how to prevent topic branches that are already # merged to 'next' branch from getting rebased, because allowing it # would result in rebasing already published history. publish=next basebranch="$1" if test "$#" = 2 then topic="refs/heads/$2" else topic=`git symbolic-ref HEAD` || exit 0 ;# we do not interrupt rebasing detached HEAD fi case "$topic" in refs/heads/??/*) ;; *) exit 0 ;# we do not interrupt others. ;; esac # Now we are dealing with a topic branch being rebased # on top of master. Is it OK to rebase it? # Does the topic really exist? git show-ref -q "$topic" || { echo >&2 "No such branch $topic" exit 1 } # Is topic fully merged to master? not_in_master=`git rev-list --pretty=oneline ^master "$topic"` if test -z "$not_in_master" then echo >&2 "$topic is fully merged to master; better remove it." exit 1 ;# we could allow it, but there is no point. fi # Is topic ever merged to next? If so you should not be rebasing it. only_next_1=`git rev-list ^master "^$topic" ${publish} | sort` only_next_2=`git rev-list ^master ${publish} | sort` if test "$only_next_1" = "$only_next_2" then not_in_topic=`git rev-list "^$topic" master` if test -z "$not_in_topic" then echo >&2 "$topic is already up to date with master" exit 1 ;# we could allow it, but there is no point. else exit 0 fi else not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"` /usr/bin/perl -e ' my $topic = $ARGV[0]; my $msg = "* $topic has commits already merged to public branch:\n"; my (%not_in_next) = map { /^([0-9a-f]+) /; ($1 => 1); } split(/\n/, $ARGV[1]); for my $elem (map { /^([0-9a-f]+) (.*)$/; [$1 => $2]; } split(/\n/, $ARGV[2])) { if (!exists $not_in_next{$elem->[0]}) { if ($msg) { print STDERR $msg; undef $msg; } print STDERR " $elem->[1]\n"; } } ' "$topic" "$not_in_next" "$not_in_master" exit 1 fi <<\DOC_END This sample hook safeguards topic branches that have been published from being rewound. The workflow assumed here is: * Once a topic branch forks from "master", "master" is never merged into it again (either directly or indirectly). * Once a topic branch is fully cooked and merged into "master", it is deleted. If you need to build on top of it to correct earlier mistakes, a new topic branch is created by forking at the tip of the "master". This is not strictly necessary, but it makes it easier to keep your history simple. * Whenever you need to test or publish your changes to topic branches, merge them into "next" branch. The script, being an example, hardcodes the publish branch name to be "next", but it is trivial to make it configurable via $GIT_DIR/config mechanism. With this workflow, you would want to know: (1) ... if a topic branch has ever been merged to "next". Young topic branches can have stupid mistakes you would rather clean up before publishing, and things that have not been merged into other branches can be easily rebased without affecting other people. But once it is published, you would not want to rewind it. (2) ... if a topic branch has been fully merged to "master". Then you can delete it. More importantly, you should not build on top of it -- other people may already want to change things related to the topic as patches against your "master", so if you need further changes, it is better to fork the topic (perhaps with the same name) afresh from the tip of "master". Let's look at this example: o---o---o---o---o---o---o---o---o---o "next" / / / / / a---a---b A / / / / / / / / c---c---c---c B / / / / \ / / / / b---b C \ / / / / / \ / ---o---o---o---o---o---o---o---o---o---o---o "master" A, B and C are topic branches. * A has one fix since it was merged up to "next". * B has finished. It has been fully merged up to "master" and "next", and is ready to be deleted. * C has not merged to "next" at all. We would want to allow C to be rebased, refuse A, and encourage B to be deleted. To compute (1): git rev-list ^master ^topic next git rev-list ^master next if these match, topic has not merged in next at all. To compute (2): git rev-list master..topic if this is empty, it is fully merged to "master". DOC_END
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/geometry/representation/tests/grid_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for grid.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.representation import grid from tensorflow_graphics.util import test_case class GridTest(test_case.TestCase): @parameterized.parameters( (((1,), (1,), (1,)), (tf.float32, tf.float32, tf.int32)), (((1, 1), (1, 1), (1,)), (tf.float32, tf.float32, tf.int32)), ) def test_generate_exception_not_raised(self, shapes, dtypes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(grid.generate, shapes, dtypes) @parameterized.parameters( ("starts must have a rank greater than 0", (), (None,), (None,)), ("stops must have a rank greater than 0", (None,), (), (None,)), ("nums must have a rank of 1", (None,), (None,), ()), ("Not all batch dimensions are identical.", (1,), (0,), (1,)), ("Not all batch dimensions are identical.", (0,), (1,), (1,)), ("must have the same number of dimensions", (1,), (1,), (0,)), ) def test_generate_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_raised(grid.generate, error_msg, shapes) @parameterized.parameters( (((-1.,), (1.,), (3,)), (((-1.,), (0.,), (1.,)),)), ((((-1.,), (-1.,)), ((1.,), (1.,)), (1,)), ((((-1.,),), ((-1.,),)),)), ) def test_generate_preset(self, test_inputs, test_outputs): """Test the uniform grid generation using fix test cases.""" self.assert_output_is_correct( grid.generate, test_inputs, test_outputs, tile=False) def test_generate_random(self): """Test the uniform grid generation.""" starts = np.array((0., 0.), dtype=np.float32) stops = np.random.randint(1, 10, size=(2)) nums = stops + 1 stops = stops.astype(np.float32) g = grid.generate(starts, stops, nums) shape = nums.tolist() + [2] xv, yv = np.meshgrid(range(shape[0]), range(shape[1]), indexing="ij") gt = np.stack((xv, yv), axis=-1).astype(np.float32) self.assertAllClose(g, gt) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for grid.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.representation import grid from tensorflow_graphics.util import test_case class GridTest(test_case.TestCase): @parameterized.parameters( (((1,), (1,), (1,)), (tf.float32, tf.float32, tf.int32)), (((1, 1), (1, 1), (1,)), (tf.float32, tf.float32, tf.int32)), ) def test_generate_exception_not_raised(self, shapes, dtypes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(grid.generate, shapes, dtypes) @parameterized.parameters( ("starts must have a rank greater than 0", (), (None,), (None,)), ("stops must have a rank greater than 0", (None,), (), (None,)), ("nums must have a rank of 1", (None,), (None,), ()), ("Not all batch dimensions are identical.", (1,), (0,), (1,)), ("Not all batch dimensions are identical.", (0,), (1,), (1,)), ("must have the same number of dimensions", (1,), (1,), (0,)), ) def test_generate_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_raised(grid.generate, error_msg, shapes) @parameterized.parameters( (((-1.,), (1.,), (3,)), (((-1.,), (0.,), (1.,)),)), ((((-1.,), (-1.,)), ((1.,), (1.,)), (1,)), ((((-1.,),), ((-1.,),)),)), ) def test_generate_preset(self, test_inputs, test_outputs): """Test the uniform grid generation using fix test cases.""" self.assert_output_is_correct( grid.generate, test_inputs, test_outputs, tile=False) def test_generate_random(self): """Test the uniform grid generation.""" starts = np.array((0., 0.), dtype=np.float32) stops = np.random.randint(1, 10, size=(2)) nums = stops + 1 stops = stops.astype(np.float32) g = grid.generate(starts, stops, nums) shape = nums.tolist() + [2] xv, yv = np.meshgrid(range(shape[0]), range(shape[1]), indexing="ij") gt = np.stack((xv, yv), axis=-1).astype(np.float32) self.assertAllClose(g, gt) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/tensorboard/mesh_visualizer/tf_mesh_dashboard/array-buffer-data-provider.js
/* Copyright 2020 The TensorFlow Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ /** * @fileoverview ArrayBufferProvider responsible for making requests to server, * receive and parse response. */ // TODO(b/135959734): this class must be refactored into base DataProvider and // subclass ArrayBufferDataProvider later. var vz_mesh; (function(vz_mesh) { /** * Types of errors during network data roundtrip. * @enum {number} */ vz_mesh.ErrorCodes = { CANCELLED: 1 // Happens when the request was cancelled before it finished. }; /** * Types of content displayed by the plugin. * @enum {number} */ const ContentType = { VERTEX: 1, FACE: 2, COLOR: 3 }; /** * Types of content displayed by the plugin mapped to underlying data types. * @enum {string} */ const ContentTypeToItemType = { VERTEX: 'float32', FACE: 'int32', COLOR: 'uint8' }; class ArrayBufferDataProvider { /** * ArrayBufferDataProvider constructor, initializes everything needed for * future requests to the server. * @param {!Object} requestManager Request manager to communicate with the * server. */ constructor(requestManager) { this._requestManager = requestManager; this._canceller = new tf_backend.Canceller(); } /** * Requests new data from the server. */ reload(run, tag, sample) { this._canceller.cancelAll(); return this._fetchMetadata(run, tag, sample); } /** * Requests new data of some particular type from the server. * @param {string} run Name of the run to get data for. * @param {string} tag Name of the tag to get data for. * @param {string} content_type Type of the content to retrieve. * @param {!array} metadata List of metadata to complete with data from the * server. * @param {number} sample Sample index from a batch of data. * @param {number} step Step value, representing a point in the time when the event occurred. * @param {!Object} meshData Map to populate with mesh data. * @return {!Object} Promise object representing server request. * @private */ _fetchDataByStep(run, tag, content_type, sample, step, meshData) { const url = tf_backend.getRouter().pluginRoute( 'mesh', '/data', new URLSearchParams({tag, run, content_type, sample, step})); const reshapeTo1xNx3 = function (data) { const channelsCount = 3; let items = []; for (let i = 0; i < data.length / channelsCount; i++) { let dataEntry = []; for (let j = 0; j < channelsCount; j++) { dataEntry.push(data[i * channelsCount + j]); } items.push(dataEntry); } return items; }; const processData = this._canceller.cancellable(response => { if (response.cancelled) { return Promise.reject({ code: vz_mesh.ErrorCodes.CANCELLED, message: 'Response was invalidated.' }); } let buffer = response.value; switch(content_type) { case 'VERTEX': meshData.vertices = reshapeTo1xNx3(new Float32Array(buffer)); break; case 'FACE': meshData.faces = reshapeTo1xNx3(new Int32Array(buffer)); break; case 'COLOR': meshData.colors = reshapeTo1xNx3(new Uint8Array(buffer)); break; } return meshData; }); return this._requestManager .fetch( url, null, 'arraybuffer', ContentTypeToItemType[content_type]) .then(response => response.arrayBuffer()) .then(processData); } /** * Requests new data for each type of metadata from the server. * Metadata consists of wall_time, step, tensor shape, content type and other * info, but not tensor data itself. * @param {!Object} stepDatum Dictionary with mesh data for a current step. * @param {string} run Name of the run to get data for. * @param {string} tag Name of the tug to get data for. * @param {number} sample Sample index from a batch of data. * @return {!Object} Joint promise for all requests being sent. * @private */ fetchData(stepDatum, run, tag, sample) { let promises = []; // Map to populate with mesh data, i.e. vertices, faces, etc. let meshData = new Map(); Object.keys(ContentType).forEach(contentType => { const component = (1 << ContentType[contentType]); if (stepDatum.components & component) { promises.push(this._fetchDataByStep( run, tag, contentType, sample, stepDatum.step, meshData)); } }); return Promise.all(promises); } /** * Requests new metadata from the server * @param {string} run Name of the run to get data for. * @param {string} tag Name of the tug to get data for. * @param {number} sample Sample index from a batch of data. * completion. * @return {!Object} Promise for requested metadata. * @private */ _fetchMetadata(run, tag, sample) { this._canceller.cancelAll(); const url = tf_backend.getRouter().pluginRoute( 'mesh', '/meshes', new URLSearchParams({tag, run, sample})); const requestData = this._canceller.cancellable(response => { if (response.cancelled) { return Promise.reject({ code: vz_mesh.ErrorCodes.CANCELLED, message: 'Response was invalidated.' }); } return response.value; }); return this._requestManager.fetch(url) .then(response => response.json()) .then(requestData) .then(this._processMetadata.bind(this)); } /** * Process server raw data into frontend friendly format. * @param {!Array|undefined} data list of raw server records. * @return {!Array} list of step datums. * @private */ _processMetadata(data) { if (!data) return; const stepToData = new Map(); for (let i = 0; i < data.length; i++) { let dataEntry = data[i]; if (!stepToData.has(dataEntry.step)) { stepToData.set(dataEntry.step, []); } stepToData.get(dataEntry.step).push(dataEntry); } let datums = []; stepToData.forEach((data) => { let datum = this._createStepDatum(data[0]); datums.push(datum); }); return datums; } /** * Process single row of server-side data and puts it in more structured form. * @param {!Object} metadata Object describing step summary. * @private * @return {!Object} with wall_time, step number and data for the step. */ _createStepDatum(metadata) { return { // The wall time within the metadata is in seconds. The Date // constructor accepts a time in milliseconds, so we multiply by 1000. wall_time: new Date(metadata.wall_time * 1000), step: metadata.step, config: metadata.config, content_type: metadata.content_type, components: metadata.components }; } } vz_mesh.ArrayBufferDataProvider = ArrayBufferDataProvider; })(vz_mesh || (vz_mesh = {})); // end of vz_mesh namespace
/* Copyright 2020 The TensorFlow Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ /** * @fileoverview ArrayBufferProvider responsible for making requests to server, * receive and parse response. */ // TODO(b/135959734): this class must be refactored into base DataProvider and // subclass ArrayBufferDataProvider later. var vz_mesh; (function(vz_mesh) { /** * Types of errors during network data roundtrip. * @enum {number} */ vz_mesh.ErrorCodes = { CANCELLED: 1 // Happens when the request was cancelled before it finished. }; /** * Types of content displayed by the plugin. * @enum {number} */ const ContentType = { VERTEX: 1, FACE: 2, COLOR: 3 }; /** * Types of content displayed by the plugin mapped to underlying data types. * @enum {string} */ const ContentTypeToItemType = { VERTEX: 'float32', FACE: 'int32', COLOR: 'uint8' }; class ArrayBufferDataProvider { /** * ArrayBufferDataProvider constructor, initializes everything needed for * future requests to the server. * @param {!Object} requestManager Request manager to communicate with the * server. */ constructor(requestManager) { this._requestManager = requestManager; this._canceller = new tf_backend.Canceller(); } /** * Requests new data from the server. */ reload(run, tag, sample) { this._canceller.cancelAll(); return this._fetchMetadata(run, tag, sample); } /** * Requests new data of some particular type from the server. * @param {string} run Name of the run to get data for. * @param {string} tag Name of the tag to get data for. * @param {string} content_type Type of the content to retrieve. * @param {!array} metadata List of metadata to complete with data from the * server. * @param {number} sample Sample index from a batch of data. * @param {number} step Step value, representing a point in the time when the event occurred. * @param {!Object} meshData Map to populate with mesh data. * @return {!Object} Promise object representing server request. * @private */ _fetchDataByStep(run, tag, content_type, sample, step, meshData) { const url = tf_backend.getRouter().pluginRoute( 'mesh', '/data', new URLSearchParams({tag, run, content_type, sample, step})); const reshapeTo1xNx3 = function (data) { const channelsCount = 3; let items = []; for (let i = 0; i < data.length / channelsCount; i++) { let dataEntry = []; for (let j = 0; j < channelsCount; j++) { dataEntry.push(data[i * channelsCount + j]); } items.push(dataEntry); } return items; }; const processData = this._canceller.cancellable(response => { if (response.cancelled) { return Promise.reject({ code: vz_mesh.ErrorCodes.CANCELLED, message: 'Response was invalidated.' }); } let buffer = response.value; switch(content_type) { case 'VERTEX': meshData.vertices = reshapeTo1xNx3(new Float32Array(buffer)); break; case 'FACE': meshData.faces = reshapeTo1xNx3(new Int32Array(buffer)); break; case 'COLOR': meshData.colors = reshapeTo1xNx3(new Uint8Array(buffer)); break; } return meshData; }); return this._requestManager .fetch( url, null, 'arraybuffer', ContentTypeToItemType[content_type]) .then(response => response.arrayBuffer()) .then(processData); } /** * Requests new data for each type of metadata from the server. * Metadata consists of wall_time, step, tensor shape, content type and other * info, but not tensor data itself. * @param {!Object} stepDatum Dictionary with mesh data for a current step. * @param {string} run Name of the run to get data for. * @param {string} tag Name of the tug to get data for. * @param {number} sample Sample index from a batch of data. * @return {!Object} Joint promise for all requests being sent. * @private */ fetchData(stepDatum, run, tag, sample) { let promises = []; // Map to populate with mesh data, i.e. vertices, faces, etc. let meshData = new Map(); Object.keys(ContentType).forEach(contentType => { const component = (1 << ContentType[contentType]); if (stepDatum.components & component) { promises.push(this._fetchDataByStep( run, tag, contentType, sample, stepDatum.step, meshData)); } }); return Promise.all(promises); } /** * Requests new metadata from the server * @param {string} run Name of the run to get data for. * @param {string} tag Name of the tug to get data for. * @param {number} sample Sample index from a batch of data. * completion. * @return {!Object} Promise for requested metadata. * @private */ _fetchMetadata(run, tag, sample) { this._canceller.cancelAll(); const url = tf_backend.getRouter().pluginRoute( 'mesh', '/meshes', new URLSearchParams({tag, run, sample})); const requestData = this._canceller.cancellable(response => { if (response.cancelled) { return Promise.reject({ code: vz_mesh.ErrorCodes.CANCELLED, message: 'Response was invalidated.' }); } return response.value; }); return this._requestManager.fetch(url) .then(response => response.json()) .then(requestData) .then(this._processMetadata.bind(this)); } /** * Process server raw data into frontend friendly format. * @param {!Array|undefined} data list of raw server records. * @return {!Array} list of step datums. * @private */ _processMetadata(data) { if (!data) return; const stepToData = new Map(); for (let i = 0; i < data.length; i++) { let dataEntry = data[i]; if (!stepToData.has(dataEntry.step)) { stepToData.set(dataEntry.step, []); } stepToData.get(dataEntry.step).push(dataEntry); } let datums = []; stepToData.forEach((data) => { let datum = this._createStepDatum(data[0]); datums.push(datum); }); return datums; } /** * Process single row of server-side data and puts it in more structured form. * @param {!Object} metadata Object describing step summary. * @private * @return {!Object} with wall_time, step number and data for the step. */ _createStepDatum(metadata) { return { // The wall time within the metadata is in seconds. The Date // constructor accepts a time in milliseconds, so we multiply by 1000. wall_time: new Date(metadata.wall_time * 1000), step: metadata.step, config: metadata.config, content_type: metadata.content_type, components: metadata.components }; } } vz_mesh.ArrayBufferDataProvider = ArrayBufferDataProvider; })(vz_mesh || (vz_mesh = {})); // end of vz_mesh namespace
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/projects/nasa/lib/utils.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """General helper functions.""" from os import path import numpy as np from skimage import measure import tensorflow.compat.v1 as tf from tensorflow_graphics.projects.cvxnet.lib.libmise import mise from tensorflow_graphics.projects.nasa.lib import datasets from tensorflow_graphics.projects.nasa.lib import models import tensorflow_probability as tfp from tqdm import trange import trimesh tf.disable_eager_execution() tfd = tfp.distributions def define_flags(): """Define command line flags.""" flags = tf.app.flags # Dataset Parameters flags.DEFINE_enum("dataset", "amass", list(k for k in datasets.dataset_dict.keys()), "Name of the dataset.") flags.DEFINE_string("data_dir", None, "Directory to load data from.") flags.mark_flag_as_required("data_dir") flags.DEFINE_integer("sample_bbox", 1024, "Number of bbox samples.") flags.DEFINE_integer("sample_surf", 1024, "Number of surface samples.") flags.DEFINE_integer("batch_size", 12, "Batch size.") flags.DEFINE_integer("motion", 0, "Index of the motion for evaluation.") flags.DEFINE_integer("subject", 0, "Index of the subject for training.") # Model Parameters flags.DEFINE_enum("model", "nasa", list(k for k in models.model_dict.keys()), "Name of the model.") flags.DEFINE_integer("n_parts", 24, "Number of parts.") flags.DEFINE_integer("total_dim", 960, "Dimension of the latent vector (in total).") flags.DEFINE_bool("shared_decoder", False, "Whether to use shared decoder.") flags.DEFINE_float("soft_blend", 5., "The constant to blend parts.") flags.DEFINE_bool("projection", True, "Whether to use projected shape features.") flags.DEFINE_float("level_set", 0.5, "The value of the level_set.") flags.DEFINE_integer("n_dims", 3, "The dimension of the query points.") # Training Parameters flags.DEFINE_float("lr", 1e-4, "Learning rate") flags.DEFINE_string("train_dir", None, "Training directory.") flags.mark_flag_as_required("train_dir") flags.DEFINE_integer("max_steps", 200000, "Number of optimization steps.") flags.DEFINE_integer("save_every", 5000, "Number of steps to save checkpoint.") flags.DEFINE_integer("summary_every", 500, "Number of steps to save checkpoint.") flags.DEFINE_float("label_w", 0.5, "Weight of labed vertices loss.") flags.DEFINE_float("minimal_w", 0.05, "Weight of minimal loss.") flags.DEFINE_bool("use_vert", True, "Whether to use vertices on the mesh for training.") flags.DEFINE_bool("use_joint", True, "Whether to use joint-based transformation.") flags.DEFINE_integer("sample_vert", 2048, "Number of vertex samples.") # Evalulation Parameters flags.DEFINE_bool("gen_mesh_only", False, "Whether to generate meshes only.") # Tracking Parameters flags.DEFINE_float("theta_lr", 5e-4, "Learning rate") flags.DEFINE_integer("max_steps_per_frame", 1792, "Number of optimization steps for tracking each frame.") flags.DEFINE_enum("gradient_type", "reparam", ["vanilla", "reparam"], "Type of gradient to use in theta optimization.") flags.DEFINE_integer("sample_track_vert", 1024, "Number of vertex samples for tracking each frame.") flags.DEFINE_integer("n_noisy_samples", 8, "Number of noisy samples per vertex") flags.DEFINE_float("bandwidth", 1e-2, "Bandwidth of the gaussian noises.") flags.DEFINE_bool( "left_trans", False, "Whether to use left side transformation (True) or right side (False).") flags.DEFINE_string("joint_data", None, "Path to load joint data.") flags.DEFINE_float("glue_w", 20., "Weight of length constraint loss.") flags.DEFINE_float("trans_range", 1., "The range of allowed translations.") def gen_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=0): """Generating meshes given a trained NASA model.""" scale = 1.1 # Scale of the padded bbox regarding the tight one. level_set = hparams.level_set latent_val = sess.run(latent, feed_dict) mesh_extractor = mise.MISE(32, 3, level_set) points = mesh_extractor.query() gt_verts = batch_val["vert"].reshape([-1, 3]) gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0) gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5 gt_scale = (gt_bbox[1] - gt_bbox[0]).max() while points.shape[0] != 0: orig_points = points points = points.astype(np.float32) points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution - 0.5) * scale points = points * gt_scale + gt_center n_points = points.shape[1] values = [] for i in range(0, n_points, 100000): # Add this to prevent OOM due to points overload. feed_dict[latent_holder] = latent_val feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000], axis=1) value = sess.run(occ[:, idx], feed_dict) values.append(value) values = np.concatenate(values, axis=1) values = values[0, :, 0].astype(np.float64) mesh_extractor.update(orig_points, values) points = mesh_extractor.query() value_grid = mesh_extractor.to_dense() try: value_grid = np.pad(value_grid, 1, "constant", constant_values=-1e6) verts, faces, normals, unused_var = measure.marching_cubes_lewiner( value_grid, min(level_set, value_grid.max())) del normals verts -= 1 verts /= np.array([ value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.shape[2] - 3 ], dtype=np.float32) verts = scale * (verts - 0.5) verts = verts * gt_scale + gt_center faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1) mesh = trimesh.Trimesh(vertices=verts, faces=faces) return mesh except: # pylint: disable=bare-except return None def save_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, pth="meshes"): """Generate and save meshes to disk given a trained NASA model.""" name = batch_val["name"][0].decode("utf-8") subject, motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) start = hparams.n_parts for i in range(start, hparams.n_parts + 1): mesh_model = gen_mesh( sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=i) mesh_name = "full_pred.obj" if mesh_model is not None: with tf.io.gfile.GFile(path.join(pth, mesh_name), "w") as fout: mesh_model.export(fout, file_type="obj") return subject, motion, frame, mesh_model def save_pointcloud(data, hparams, pth="pointcloud"): """Save pointcloud to disk.""" name = data["name"][0].decode("utf-8") unused_subject, unused_motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) mesh_name = "pointcloud.obj" with tf.io.gfile.GFile(path.join(pth, mesh_name), "w") as fout: pointcloud = data["vert"].reshape([-1, 3]) for v in pointcloud: fout.write("v {0} {1} {2}\n".format(*v.tolist())) def amass_name_helper(name): name, frame = name.split("-") subject = name[:5] motion = name[6:] return subject, motion, frame def make_summary_feed_dict( iou_hook, iou, best_hook, best_iou, ): feed_dict = {} feed_dict[iou_hook] = iou feed_dict[best_hook] = best_iou return feed_dict def parse_global_step(ckpt): basename = path.basename(ckpt) return int(basename.split("-")[-1]) def compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ, point, label, hparams): """Compute IoU.""" iou = 0. eps = 1e-9 latent_val = sess.run(latent, feed_dict) n_points = point.shape[2] preds = [] for start in range(0, n_points, 100000): feed_dict[point_holder] = point[:, :, start:start + 100000] feed_dict[latent_holder] = latent_val pred = sess.run(occ, feed_dict) preds.append(pred) pred = np.concatenate(preds, axis=2) pred = (pred >= hparams.level_set).astype(np.float32) label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1) iou += np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)), eps) return iou def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans, joints, hparams): """Compute the prior term as a glue loss.""" n_dims = hparams.n_dims # Invert the transformation r_inv = inv_transforms[..., :n_dims, :n_dims] t_inv = inv_transforms[..., :n_dims, -1:] r = tf.transpose(r_inv, [0, 2, 1]) t = -tf.matmul(r, t_inv) transforms = tf.concat( [tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]], axis=-2) transforms = tf.matmul(transforms, inv_first_frame_trans) # Compute transformations of father joints and apply it to vectors from frame0 father_transforms = tf.reduce_sum( tf.expand_dims(transforms, axis=1) * connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]), axis=0) end_pts_homo = tf.expand_dims( tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1) end_pts_transformed = tf.matmul(father_transforms, end_pts_homo) end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims] # Compute vectors in current configuration pred_links = tf.reshape(joints, [hparams.n_parts, n_dims]) # Compute distance between links and transformed vectors return tf.reduce_sum(tf.square(pred_links - end_pts_transformed)) def vanilla_theta_gradient(model_fn, batch_holder, hparams): """A vanilla gradient estimator for the pose, theta.""" latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, "gen_mesh") if hparams.sample_vert > 0: points = batch_holder["point"] weights = batch_holder["weight"] n_vert = tf.shape(points)[2] sample_indices = tf.random.uniform([1, 1, hparams.sample_vert], minval=0, maxval=n_vert, dtype=tf.int32) points = tf.gather(points, sample_indices, axis=2, batch_dims=2) weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder["point"] = points batch_holder["weight"] = weights unused_var0, unused_var1, occ = model_fn(batch_holder, None, None, "gen_mesh") return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def reparam_theta_gradient(model_fn, batch_holder, hparams): """A gradient estimaor for the pose, theta, using the reparam trick.""" sigma = hparams.bandwidth n_samples = hparams.n_noisy_samples latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, "gen_mesh") if hparams.sample_vert > 0: points = batch_holder["point"] weights = batch_holder["weight"] n_vert = tf.shape(points)[2] sample_indices = tf.random.uniform([1, 1, hparams.sample_vert], minval=0, maxval=n_vert, dtype=tf.int32) points = tf.gather(points, sample_indices, axis=2, batch_dims=2) weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder["point"] = points batch_holder["weight"] = weights dist = tfd.Normal(loc=0., scale=sigma) n_pts = hparams.sample_vert if hparams.sample_vert > 0 else hparams.n_vert noises = dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims)) unused_var0, unused_var1, occ = model_fn(batch_holder, noises, None, "gen_mesh") occ = tf.reshape(occ, [1, hparams.n_parts + 1, -1, n_samples, 1]) occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3) return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss, sess, k, hparams): """Optimize the pose, theta, during tracking.""" sess.run(reset_op) loss_val = 0 glue_val = 0 with trange(hparams.max_steps_per_frame) as t: for unused_i in t: loss_val, unused_var, rec_val, glue_val = sess.run( [loss, train_op, rec_loss, glue_loss], feed_dict) t.set_description("Frame_{0} {1:.4f}|{2:.4f}".format( k, rec_val, glue_val)) return loss_val, glue_val
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """General helper functions.""" from os import path import numpy as np from skimage import measure import tensorflow.compat.v1 as tf from tensorflow_graphics.projects.cvxnet.lib.libmise import mise from tensorflow_graphics.projects.nasa.lib import datasets from tensorflow_graphics.projects.nasa.lib import models import tensorflow_probability as tfp from tqdm import trange import trimesh tf.disable_eager_execution() tfd = tfp.distributions def define_flags(): """Define command line flags.""" flags = tf.app.flags # Dataset Parameters flags.DEFINE_enum("dataset", "amass", list(k for k in datasets.dataset_dict.keys()), "Name of the dataset.") flags.DEFINE_string("data_dir", None, "Directory to load data from.") flags.mark_flag_as_required("data_dir") flags.DEFINE_integer("sample_bbox", 1024, "Number of bbox samples.") flags.DEFINE_integer("sample_surf", 1024, "Number of surface samples.") flags.DEFINE_integer("batch_size", 12, "Batch size.") flags.DEFINE_integer("motion", 0, "Index of the motion for evaluation.") flags.DEFINE_integer("subject", 0, "Index of the subject for training.") # Model Parameters flags.DEFINE_enum("model", "nasa", list(k for k in models.model_dict.keys()), "Name of the model.") flags.DEFINE_integer("n_parts", 24, "Number of parts.") flags.DEFINE_integer("total_dim", 960, "Dimension of the latent vector (in total).") flags.DEFINE_bool("shared_decoder", False, "Whether to use shared decoder.") flags.DEFINE_float("soft_blend", 5., "The constant to blend parts.") flags.DEFINE_bool("projection", True, "Whether to use projected shape features.") flags.DEFINE_float("level_set", 0.5, "The value of the level_set.") flags.DEFINE_integer("n_dims", 3, "The dimension of the query points.") # Training Parameters flags.DEFINE_float("lr", 1e-4, "Learning rate") flags.DEFINE_string("train_dir", None, "Training directory.") flags.mark_flag_as_required("train_dir") flags.DEFINE_integer("max_steps", 200000, "Number of optimization steps.") flags.DEFINE_integer("save_every", 5000, "Number of steps to save checkpoint.") flags.DEFINE_integer("summary_every", 500, "Number of steps to save checkpoint.") flags.DEFINE_float("label_w", 0.5, "Weight of labed vertices loss.") flags.DEFINE_float("minimal_w", 0.05, "Weight of minimal loss.") flags.DEFINE_bool("use_vert", True, "Whether to use vertices on the mesh for training.") flags.DEFINE_bool("use_joint", True, "Whether to use joint-based transformation.") flags.DEFINE_integer("sample_vert", 2048, "Number of vertex samples.") # Evalulation Parameters flags.DEFINE_bool("gen_mesh_only", False, "Whether to generate meshes only.") # Tracking Parameters flags.DEFINE_float("theta_lr", 5e-4, "Learning rate") flags.DEFINE_integer("max_steps_per_frame", 1792, "Number of optimization steps for tracking each frame.") flags.DEFINE_enum("gradient_type", "reparam", ["vanilla", "reparam"], "Type of gradient to use in theta optimization.") flags.DEFINE_integer("sample_track_vert", 1024, "Number of vertex samples for tracking each frame.") flags.DEFINE_integer("n_noisy_samples", 8, "Number of noisy samples per vertex") flags.DEFINE_float("bandwidth", 1e-2, "Bandwidth of the gaussian noises.") flags.DEFINE_bool( "left_trans", False, "Whether to use left side transformation (True) or right side (False).") flags.DEFINE_string("joint_data", None, "Path to load joint data.") flags.DEFINE_float("glue_w", 20., "Weight of length constraint loss.") flags.DEFINE_float("trans_range", 1., "The range of allowed translations.") def gen_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=0): """Generating meshes given a trained NASA model.""" scale = 1.1 # Scale of the padded bbox regarding the tight one. level_set = hparams.level_set latent_val = sess.run(latent, feed_dict) mesh_extractor = mise.MISE(32, 3, level_set) points = mesh_extractor.query() gt_verts = batch_val["vert"].reshape([-1, 3]) gt_bbox = np.stack([gt_verts.min(axis=0), gt_verts.max(axis=0)], axis=0) gt_center = (gt_bbox[0] + gt_bbox[1]) * 0.5 gt_scale = (gt_bbox[1] - gt_bbox[0]).max() while points.shape[0] != 0: orig_points = points points = points.astype(np.float32) points = (np.expand_dims(points, axis=0) / mesh_extractor.resolution - 0.5) * scale points = points * gt_scale + gt_center n_points = points.shape[1] values = [] for i in range(0, n_points, 100000): # Add this to prevent OOM due to points overload. feed_dict[latent_holder] = latent_val feed_dict[point_holder] = np.expand_dims(points[:, i:i + 100000], axis=1) value = sess.run(occ[:, idx], feed_dict) values.append(value) values = np.concatenate(values, axis=1) values = values[0, :, 0].astype(np.float64) mesh_extractor.update(orig_points, values) points = mesh_extractor.query() value_grid = mesh_extractor.to_dense() try: value_grid = np.pad(value_grid, 1, "constant", constant_values=-1e6) verts, faces, normals, unused_var = measure.marching_cubes_lewiner( value_grid, min(level_set, value_grid.max())) del normals verts -= 1 verts /= np.array([ value_grid.shape[0] - 3, value_grid.shape[1] - 3, value_grid.shape[2] - 3 ], dtype=np.float32) verts = scale * (verts - 0.5) verts = verts * gt_scale + gt_center faces = np.stack([faces[..., 1], faces[..., 0], faces[..., 2]], axis=-1) mesh = trimesh.Trimesh(vertices=verts, faces=faces) return mesh except: # pylint: disable=bare-except return None def save_mesh(sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, pth="meshes"): """Generate and save meshes to disk given a trained NASA model.""" name = batch_val["name"][0].decode("utf-8") subject, motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) start = hparams.n_parts for i in range(start, hparams.n_parts + 1): mesh_model = gen_mesh( sess, feed_dict, latent_holder, point_holder, latent, occ, batch_val, hparams, idx=i) mesh_name = "full_pred.obj" if mesh_model is not None: with tf.io.gfile.GFile(path.join(pth, mesh_name), "w") as fout: mesh_model.export(fout, file_type="obj") return subject, motion, frame, mesh_model def save_pointcloud(data, hparams, pth="pointcloud"): """Save pointcloud to disk.""" name = data["name"][0].decode("utf-8") unused_subject, unused_motion, frame = amass_name_helper(name) pth = path.join(hparams.train_dir, pth, frame) if not tf.io.gfile.isdir(pth): tf.io.gfile.makedirs(pth) mesh_name = "pointcloud.obj" with tf.io.gfile.GFile(path.join(pth, mesh_name), "w") as fout: pointcloud = data["vert"].reshape([-1, 3]) for v in pointcloud: fout.write("v {0} {1} {2}\n".format(*v.tolist())) def amass_name_helper(name): name, frame = name.split("-") subject = name[:5] motion = name[6:] return subject, motion, frame def make_summary_feed_dict( iou_hook, iou, best_hook, best_iou, ): feed_dict = {} feed_dict[iou_hook] = iou feed_dict[best_hook] = best_iou return feed_dict def parse_global_step(ckpt): basename = path.basename(ckpt) return int(basename.split("-")[-1]) def compute_iou(sess, feed_dict, latent_holder, point_holder, latent, occ, point, label, hparams): """Compute IoU.""" iou = 0. eps = 1e-9 latent_val = sess.run(latent, feed_dict) n_points = point.shape[2] preds = [] for start in range(0, n_points, 100000): feed_dict[point_holder] = point[:, :, start:start + 100000] feed_dict[latent_holder] = latent_val pred = sess.run(occ, feed_dict) preds.append(pred) pred = np.concatenate(preds, axis=2) pred = (pred >= hparams.level_set).astype(np.float32) label = (label[:, :1] >= 0.5).astype(np.float32).squeeze(axis=1) iou += np.sum(pred * label) / np.maximum(np.sum(np.maximum(pred, label)), eps) return iou def compute_glue_loss(connect, end_pts, inv_transforms, inv_first_frame_trans, joints, hparams): """Compute the prior term as a glue loss.""" n_dims = hparams.n_dims # Invert the transformation r_inv = inv_transforms[..., :n_dims, :n_dims] t_inv = inv_transforms[..., :n_dims, -1:] r = tf.transpose(r_inv, [0, 2, 1]) t = -tf.matmul(r, t_inv) transforms = tf.concat( [tf.concat([r, t], axis=-1), inv_transforms[..., -1:, :]], axis=-2) transforms = tf.matmul(transforms, inv_first_frame_trans) # Compute transformations of father joints and apply it to vectors from frame0 father_transforms = tf.reduce_sum( tf.expand_dims(transforms, axis=1) * connect.reshape([hparams.n_parts, hparams.n_parts, 1, 1]), axis=0) end_pts_homo = tf.expand_dims( tf.concat([end_pts, tf.ones_like(end_pts[..., :1])], axis=-1), axis=-1) end_pts_transformed = tf.matmul(father_transforms, end_pts_homo) end_pts_transformed = tf.squeeze(end_pts_transformed, axis=-1)[..., :n_dims] # Compute vectors in current configuration pred_links = tf.reshape(joints, [hparams.n_parts, n_dims]) # Compute distance between links and transformed vectors return tf.reduce_sum(tf.square(pred_links - end_pts_transformed)) def vanilla_theta_gradient(model_fn, batch_holder, hparams): """A vanilla gradient estimator for the pose, theta.""" latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, "gen_mesh") if hparams.sample_vert > 0: points = batch_holder["point"] weights = batch_holder["weight"] n_vert = tf.shape(points)[2] sample_indices = tf.random.uniform([1, 1, hparams.sample_vert], minval=0, maxval=n_vert, dtype=tf.int32) points = tf.gather(points, sample_indices, axis=2, batch_dims=2) weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder["point"] = points batch_holder["weight"] = weights unused_var0, unused_var1, occ = model_fn(batch_holder, None, None, "gen_mesh") return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def reparam_theta_gradient(model_fn, batch_holder, hparams): """A gradient estimaor for the pose, theta, using the reparam trick.""" sigma = hparams.bandwidth n_samples = hparams.n_noisy_samples latent_holder, latent, occ_eval = model_fn(batch_holder, None, None, "gen_mesh") if hparams.sample_vert > 0: points = batch_holder["point"] weights = batch_holder["weight"] n_vert = tf.shape(points)[2] sample_indices = tf.random.uniform([1, 1, hparams.sample_vert], minval=0, maxval=n_vert, dtype=tf.int32) points = tf.gather(points, sample_indices, axis=2, batch_dims=2) weights = tf.gather(weights, sample_indices, axis=2, batch_dims=2) batch_holder["point"] = points batch_holder["weight"] = weights dist = tfd.Normal(loc=0., scale=sigma) n_pts = hparams.sample_vert if hparams.sample_vert > 0 else hparams.n_vert noises = dist.sample((1, hparams.n_parts, n_pts, n_samples, hparams.n_dims)) unused_var0, unused_var1, occ = model_fn(batch_holder, noises, None, "gen_mesh") occ = tf.reshape(occ, [1, hparams.n_parts + 1, -1, n_samples, 1]) occ = tf.reduce_mean(occ[:, hparams.n_parts:], axis=3) return latent_holder, latent, occ_eval, tf.reduce_mean( tf.square(occ - hparams.level_set)) def optimize_theta(feed_dict, loss, reset_op, train_op, rec_loss, glue_loss, sess, k, hparams): """Optimize the pose, theta, during tracking.""" sess.run(reset_op) loss_val = 0 glue_val = 0 with trange(hparams.max_steps_per_frame) as t: for unused_i in t: loss_val, unused_var, rec_val, glue_val = sess.run( [loss, train_op, rec_loss, glue_loss], feed_dict) t.set_description("Frame_{0} {1:.4f}|{2:.4f}".format( k, rec_val, glue_val)) return loss_val, glue_val
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/datasets/modelnet40/modelnet40_show.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Visualization in 3D of modelnet40 dataset. See: https://www.tensorflow.org/datasets/api_docs/python/tfds/load """ from absl import app import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # pylint:disable=unused-import from tensorflow_graphics.datasets.modelnet40 import ModelNet40 def main(_): ds_train, _ = ModelNet40.load( split="train", data_dir="~/tensorflow_dataset", with_info=True) for example in ds_train.take(1): points = example["points"] label = example["label"] fig = plt.figure() ax3 = fig.add_subplot(111, projection="3d") ax3.set_title("Example with label {}".format(label)) scatter3 = lambda p, c="r", *args: ax3.scatter(p[:, 0], p[:, 1], p[:, 2], c) scatter3(points) if __name__ == "__main__": app.run(main)
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Visualization in 3D of modelnet40 dataset. See: https://www.tensorflow.org/datasets/api_docs/python/tfds/load """ from absl import app import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # pylint:disable=unused-import from tensorflow_graphics.datasets.modelnet40 import ModelNet40 def main(_): ds_train, _ = ModelNet40.load( split="train", data_dir="~/tensorflow_dataset", with_info=True) for example in ds_train.take(1): points = example["points"] label = example["label"] fig = plt.figure() ax3 = fig.add_subplot(111, projection="3d") ax3.set_title("Example with label {}".format(label)) scatter3 = lambda p, c="r", *args: ax3.scatter(p[:, 0], p[:, 1], p[:, 2], c) scatter3(points) if __name__ == "__main__": app.run(main)
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/util/doc.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Query environment variable for documentation building.""" import os def _import_tfg_docs(): """Checks if __init__.py imports should be executed (for buildling docs).""" return os.getenv("TFG_DOC_IMPORTS", "0") == "1" def enable_tfg_doc_imports(): """Re-enables the imports in the __init__.py so that docs can be built.""" os.environ["TFG_DOC_IMPORTS"] = "1"
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Query environment variable for documentation building.""" import os def _import_tfg_docs(): """Checks if __init__.py imports should be executed (for buildling docs).""" return os.getenv("TFG_DOC_IMPORTS", "0") == "1" def enable_tfg_doc_imports(): """Re-enables the imports in the __init__.py so that docs can be built.""" os.environ["TFG_DOC_IMPORTS"] = "1"
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/projects/local_implicit_grid/reconstruct_geometry.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Reconstruct scene using LIG. """ import os import warnings from absl import app from absl import flags import numpy as np from tensorflow.compat.v1.io import gfile from tensorflow_graphics.projects.local_implicit_grid.core import point_utils as pu from tensorflow_graphics.projects.local_implicit_grid.core import postprocess from tensorflow_graphics.projects.local_implicit_grid.core import reconstruction as rec import trimesh os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' flags.DEFINE_string('input_ply', '', 'Input point sample ply file.') flags.DEFINE_string('output_ply', '', 'Reconstructed scene ply file.') flags.DEFINE_integer('steps', 10000, 'Number of optimization steps.') flags.DEFINE_integer('npoints', 10000, 'Number of points to sample per iteration during optim.') flags.DEFINE_float('part_size', 0.25, 'Size of parts per cell (meters).') flags.DEFINE_float('init_std', 0.02, 'Initial std to draw random code from.') flags.DEFINE_integer('res_per_part', 0, 'Evaluation resolution per part. A higher value produces a' 'finer output mesh. 0 to use default value. ' 'Recommended value: 8, 16 or 32.') flags.DEFINE_boolean('overlap', True, 'Use overlapping latent grids.') flags.DEFINE_boolean('postprocess', True, 'Post process to remove backfaces.') flags.DEFINE_string('ckpt_dir', 'pretrained_ckpt', 'Checkpoint directory.') FLAGS = flags.FLAGS def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') if not FLAGS.input_ply: raise IOError('--input_ply must be specified.') if not FLAGS.output_ply: FLAGS.output_ply = FLAGS.input_ply.replace('.ply', '.reconstruct.ply') # load point cloud from ply file v, n = pu.read_point_ply(FLAGS.input_ply) # check if part size is too large min_bb = np.min(np.max(v, axis=0) - np.min(v, axis=0)) if FLAGS.part_size > 0.25 * min_bb: warnings.warn( 'WARNING: part_size seems too large. Recommend using a part_size < ' '{:.2f} for this shape.'.format(0.25 * min_bb), UserWarning) surface_points = np.concatenate([v, n], axis=1) near_surface_samples = rec.get_in_out_from_ray( surface_points, sample_factor=10, std=0.01) xmin = np.min(surface_points[:, :3], 0) xmax = np.max(surface_points[:, :3], 0) # add some extra slack to xmin and xmax xmin -= FLAGS.part_size xmax += FLAGS.part_size if FLAGS.res_per_part == 0: res_per_part = int(64*FLAGS.part_size) else: res_per_part = FLAGS.res_per_part npts = min(near_surface_samples.shape[0], FLAGS.npoints)-1 print('Performing latent grid optimization...') v, f, _, _ = rec.encode_decoder_one_scene( near_surface_samples, FLAGS.ckpt_dir, FLAGS.part_size, overlap=True, indep_pt_loss=True, init_std=FLAGS.init_std, xmin=xmin, xmax=xmax, res_per_part=res_per_part, npts=npts, steps=FLAGS.steps) out_dir = os.path.dirname(FLAGS.output_ply) if out_dir and not gfile.exists(out_dir): gfile.makedirs(out_dir) mesh = trimesh.Trimesh(v, f) if FLAGS.postprocess: print('Postprocessing generated mesh...') mesh = postprocess.remove_backface(mesh, surface_points) print('Writing reconstructed mesh to {}'.format(FLAGS.output_ply)) with gfile.GFile(FLAGS.output_ply, 'wb') as fh: mesh.export(fh, 'ply') if __name__ == '__main__': app.run(main)
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Reconstruct scene using LIG. """ import os import warnings from absl import app from absl import flags import numpy as np from tensorflow.compat.v1.io import gfile from tensorflow_graphics.projects.local_implicit_grid.core import point_utils as pu from tensorflow_graphics.projects.local_implicit_grid.core import postprocess from tensorflow_graphics.projects.local_implicit_grid.core import reconstruction as rec import trimesh os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' flags.DEFINE_string('input_ply', '', 'Input point sample ply file.') flags.DEFINE_string('output_ply', '', 'Reconstructed scene ply file.') flags.DEFINE_integer('steps', 10000, 'Number of optimization steps.') flags.DEFINE_integer('npoints', 10000, 'Number of points to sample per iteration during optim.') flags.DEFINE_float('part_size', 0.25, 'Size of parts per cell (meters).') flags.DEFINE_float('init_std', 0.02, 'Initial std to draw random code from.') flags.DEFINE_integer('res_per_part', 0, 'Evaluation resolution per part. A higher value produces a' 'finer output mesh. 0 to use default value. ' 'Recommended value: 8, 16 or 32.') flags.DEFINE_boolean('overlap', True, 'Use overlapping latent grids.') flags.DEFINE_boolean('postprocess', True, 'Post process to remove backfaces.') flags.DEFINE_string('ckpt_dir', 'pretrained_ckpt', 'Checkpoint directory.') FLAGS = flags.FLAGS def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') if not FLAGS.input_ply: raise IOError('--input_ply must be specified.') if not FLAGS.output_ply: FLAGS.output_ply = FLAGS.input_ply.replace('.ply', '.reconstruct.ply') # load point cloud from ply file v, n = pu.read_point_ply(FLAGS.input_ply) # check if part size is too large min_bb = np.min(np.max(v, axis=0) - np.min(v, axis=0)) if FLAGS.part_size > 0.25 * min_bb: warnings.warn( 'WARNING: part_size seems too large. Recommend using a part_size < ' '{:.2f} for this shape.'.format(0.25 * min_bb), UserWarning) surface_points = np.concatenate([v, n], axis=1) near_surface_samples = rec.get_in_out_from_ray( surface_points, sample_factor=10, std=0.01) xmin = np.min(surface_points[:, :3], 0) xmax = np.max(surface_points[:, :3], 0) # add some extra slack to xmin and xmax xmin -= FLAGS.part_size xmax += FLAGS.part_size if FLAGS.res_per_part == 0: res_per_part = int(64*FLAGS.part_size) else: res_per_part = FLAGS.res_per_part npts = min(near_surface_samples.shape[0], FLAGS.npoints)-1 print('Performing latent grid optimization...') v, f, _, _ = rec.encode_decoder_one_scene( near_surface_samples, FLAGS.ckpt_dir, FLAGS.part_size, overlap=True, indep_pt_loss=True, init_std=FLAGS.init_std, xmin=xmin, xmax=xmax, res_per_part=res_per_part, npts=npts, steps=FLAGS.steps) out_dir = os.path.dirname(FLAGS.output_ply) if out_dir and not gfile.exists(out_dir): gfile.makedirs(out_dir) mesh = trimesh.Trimesh(v, f) if FLAGS.postprocess: print('Postprocessing generated mesh...') mesh = postprocess.remove_backface(mesh, surface_points) print('Writing reconstructed mesh to {}'.format(FLAGS.output_ply)) with gfile.GFile(FLAGS.output_ply, 'wb') as fh: mesh.export(fh, 'ply') if __name__ == '__main__': app.run(main)
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/datasets/modelnet40/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """`tensorflow_graphics.datasets.modelnet40` module.""" from tensorflow_graphics.datasets.modelnet40.modelnet40 import ModelNet40 __all__ = [ "ModelNet40", ]
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """`tensorflow_graphics.datasets.modelnet40` module.""" from tensorflow_graphics.datasets.modelnet40.modelnet40 import ModelNet40 __all__ = [ "ModelNet40", ]
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./CONTRIBUTING.md
# How to Contribute We'd love to accept your patches and contributions to this project. There are just a few small guidelines you need to follow. ## Contributor License Agreement Contributions to this project must be accompanied by a Contributor License Agreement. You (or your employer) retain the copyright to your contribution; this simply gives us permission to use and redistribute your contributions as part of the project. Head over to <https://cla.developers.google.com/> to see your current agreements on file or to sign a new one. You generally only need to submit a CLA once, so if you've already submitted one (even if it was for a different project), you probably don't need to do it again. ## Code reviews All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests. ## What features to add? The library is open to any contributions along the lines of computer graphics, with the top-level themes being rendering, physics simulation, and geometry processing. Contributions can be in the form of low level functions (majority of the library), Neural Networks layers or Colaboratory notebooks. ## Guidelines for Tensorflow operations TensorFlow Graphics follows the TensorFlow [contributor guidelines](https://www.tensorflow.org/community/contribute) and [code style guide](\(https://www.tensorflow.org/community/contribute/code_style\)). Besides these, TensorFlow Graphics has a few more guidelines which you can find below. ### Programming languages Unless this comes at a significant performance hit, pure Python is preferred. ### Structure of a function The general structure of a function should be as follows: * Name of the function followed by inputs to that function * Doc-string documentation * Definition of the scope using `tf.compat.v1.name_scope` * Functions that take tensors as arguments should call `tf.convert_to_tensor` * Checking the shape and value of the inputs as necessary * Main logic of the function ### Function names Prefer function names that are concise, descriptive, and integrate well with the module name when called. For instance, the `rotate` function from the `rotation_matrix_3d` sub-module can be called using `rotation_matrix_3d.rotate`, and makes it easy for anyone to understand what is being calculated. Functions that are only meant to be local to the file in which they are written should have an underscore before their name. ### Input parameters The first arguments should be tensors, followed by python parameters, and finally the name scope for the TensorFlow operation. ### Input shapes * The first dimensions of a tensor should represent the shape of the batch, and the last dimensions should represent the core shape of the elements used by the function. For instance, `rotation_matrix_3d.rotate` accepts rotation matrices of shape `[A1, ..., An, 3, 3]` where `[A1, ..., An]` are the optional batch dimensions, and `[3, 3]` is the shape required to capture 3D rotation matrices. * Every function must support batch dimensions of any shape, including tensors with no batch dimensions. * For input tensors with common batch shapes, document whether they can be broadcast compatible or not, and try to make them compatible when possible by, for instance, using `shape.get_broadcasted_shape` and `tf.broadcast_to`. ### Documentation Every function must have a docstring-type documentation describing what the function is performing, its arguments, and what is returned. The input sizes must be written between backquotes with batch dimensions indexed by letters and numbers, for instance: \`[A1, ..., An, 3]\`. Here `[A1, ..., An]` are the batch dimensions, and 3 is the intrinsic dimension required for the operation (e.g. a point in 3d). Prefer to put the batch dimension first. ### Error handling Handling unexpected inputs usually consists in checking that their shapes are consistent with expectations, which can be performed with `shape.check_static`, but also checking that the content of the tensors are valid (e.g. value in a specific range, no NaNs etc.), which can be performed with utilities provided in the `asserts` module. ### Differentiability and stable gradients There are several TF operations that can turn derivatives to zero at unintended points of your functions / operations. This can be avoided by using tools provided in the util.safe_ops module. If it can not be avoided, make sure to add tests checking the Jacobians of the function at the potentially discontinuous points of the function. See [Testing Jacobians](#testing-jacobians) below. Examples of such functions include: * tf.maximum / tf.minimum(a(x), b(x)): These create piecewise functions, which means derivatives can be discontinuous or zero for some ranges or points. * tf.clip_by_value / tf.clip_by_norm: These are also piecewise functions where the actual function is replaced with a constant piece for certain points or ranges, which makes the derivative zero, even if it actually isn’t. * tf.where(cond, a, b): This is another way of creating piecewise functions. This should be used only if it is really meant to create a piecewise function. The util.safe_ops submodule contains helper functions that can resolve issues with divisions by zero, but also helpers to ensure that the data is in the appropriate range. For instance a dot product of two normalized vectors can result in values outside of [-1.0, 1.0] range due to fixed point arithmetic. This in turn may result in NaN if used with arcsin or arccos. In such cases, safe_shrink in util.safe_ops should be used rather than clipping the range, since clipping removes derivatives which should be non-zero at these points. Cases involving zero divided by zero are a bit more involved and require dedicated workarounds. ### Software compatibility The library is intended to be compatible with the latest stable TensorFlow 1 release as well as the latest nightly package for TensorFlow 2. We also aim to be compatible with a couple of versions of Python. Testing for all the above is automatically performed using [travis](https://travis-ci.org/tensorflow/graphics). ### Hardware compatibility Except for performance reasons, every function must be hardware agnostic (e.g. CPU / GPU / TPU). ### Python modules Each module must contain a \_\_init__.py file which lists all the sub-modules it contains. ## Tests Testing code is essential to make the library usable by everyone at all times. In the following, we will briefly review our policies around unit testing and code coverage. ### Unit testing * all test classes must derive from tensorflow_graphics.util.test_case.TestCase * to improve readability of the code, and minimize duplication, the parameters passed to all the test functions described below are passed using `parameterized.parameters` provided by `absl.testing`. #### Test files Each module containing code has associated tests in the module's test sub-folder. Each test sub-folder must contain an empty \_\_init__.py, and one file per .py file in the module. For instance, if the `transformation` module contains `quaternion.py`, the tests associated with that python file should be located in `transformation/tests/quaterion_test.py`. In the following, we use FN as shorthand for the name of the function to be tested. Let's now have a look at how tests are structured and specific things to test for. #### Structure of a test TensorFlow Graphics follow the arrange-act-assert testing pattern. Moreover, if multiple tests are used in a single function to test for different but similar behavior, self.subTest should be used to create separate blocks. #### Testing return values The function names and behavior to use for testing return values are as follows: * `test_FN_random` to ensure that functions return the expected result for any valid input. * `test_FN_preset` to test specific inputs, and to make sure that corner cases are handled appropriately. #### Error handling Following are the function names and behavior to use for testing that errors are handled appropriately: * `test_FN_exception_raised` to test that functions return the expected error messages when input parameters are invalid (e.g. shape or values). * `test_FN_exception_not_raised` to make sure that valid arguments do not raise any errors. N.B.: For both test functions above, make sure to include `None` in some of the input shapes. #### Testing Jacobians Derivatives and gradients being at the core of Deep Learning training algorithms, testing for the stability and correctness of gradients is core to prevent problems, especially while training large networks. We perform numerical differentiation to ensure the correctness and stability of the Jacobians of any function by defining: * `test_FN_jacobian_random` to ensure that Jacobians are correct on the whole input domain. * `test_FN_jacobian_preset` to test the stability of Jacobian around corner cases, or points where the function might not be smooth / continuous. N.B.: for both test functions above, make sure to decorate them with `@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)` to avoid potential errors arising due to finite differentiation (e.g. tensor not normalized anymore) ### Coverage The GitHub mirror of Tensorflow Graphics is using <a href="https://coveralls.io/">coveralls</a> to assess the test coverage. The version of Tensorflow Graphics that is internal to Google contains the same features compared to what is available on GitHub, but has access to more tools for testing. For this project, our internal policy is to only submit code for which our internal testing tools report at least 99% coverage. This number might seem to be a steep requirement, but given the nature of the project, this is obtained with reasonable efforts.
# How to Contribute We'd love to accept your patches and contributions to this project. There are just a few small guidelines you need to follow. ## Contributor License Agreement Contributions to this project must be accompanied by a Contributor License Agreement. You (or your employer) retain the copyright to your contribution; this simply gives us permission to use and redistribute your contributions as part of the project. Head over to <https://cla.developers.google.com/> to see your current agreements on file or to sign a new one. You generally only need to submit a CLA once, so if you've already submitted one (even if it was for a different project), you probably don't need to do it again. ## Code reviews All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests. ## What features to add? The library is open to any contributions along the lines of computer graphics, with the top-level themes being rendering, physics simulation, and geometry processing. Contributions can be in the form of low level functions (majority of the library), Neural Networks layers or Colaboratory notebooks. ## Guidelines for Tensorflow operations TensorFlow Graphics follows the TensorFlow [contributor guidelines](https://www.tensorflow.org/community/contribute) and [code style guide](\(https://www.tensorflow.org/community/contribute/code_style\)). Besides these, TensorFlow Graphics has a few more guidelines which you can find below. ### Programming languages Unless this comes at a significant performance hit, pure Python is preferred. ### Structure of a function The general structure of a function should be as follows: * Name of the function followed by inputs to that function * Doc-string documentation * Definition of the scope using `tf.compat.v1.name_scope` * Functions that take tensors as arguments should call `tf.convert_to_tensor` * Checking the shape and value of the inputs as necessary * Main logic of the function ### Function names Prefer function names that are concise, descriptive, and integrate well with the module name when called. For instance, the `rotate` function from the `rotation_matrix_3d` sub-module can be called using `rotation_matrix_3d.rotate`, and makes it easy for anyone to understand what is being calculated. Functions that are only meant to be local to the file in which they are written should have an underscore before their name. ### Input parameters The first arguments should be tensors, followed by python parameters, and finally the name scope for the TensorFlow operation. ### Input shapes * The first dimensions of a tensor should represent the shape of the batch, and the last dimensions should represent the core shape of the elements used by the function. For instance, `rotation_matrix_3d.rotate` accepts rotation matrices of shape `[A1, ..., An, 3, 3]` where `[A1, ..., An]` are the optional batch dimensions, and `[3, 3]` is the shape required to capture 3D rotation matrices. * Every function must support batch dimensions of any shape, including tensors with no batch dimensions. * For input tensors with common batch shapes, document whether they can be broadcast compatible or not, and try to make them compatible when possible by, for instance, using `shape.get_broadcasted_shape` and `tf.broadcast_to`. ### Documentation Every function must have a docstring-type documentation describing what the function is performing, its arguments, and what is returned. The input sizes must be written between backquotes with batch dimensions indexed by letters and numbers, for instance: \`[A1, ..., An, 3]\`. Here `[A1, ..., An]` are the batch dimensions, and 3 is the intrinsic dimension required for the operation (e.g. a point in 3d). Prefer to put the batch dimension first. ### Error handling Handling unexpected inputs usually consists in checking that their shapes are consistent with expectations, which can be performed with `shape.check_static`, but also checking that the content of the tensors are valid (e.g. value in a specific range, no NaNs etc.), which can be performed with utilities provided in the `asserts` module. ### Differentiability and stable gradients There are several TF operations that can turn derivatives to zero at unintended points of your functions / operations. This can be avoided by using tools provided in the util.safe_ops module. If it can not be avoided, make sure to add tests checking the Jacobians of the function at the potentially discontinuous points of the function. See [Testing Jacobians](#testing-jacobians) below. Examples of such functions include: * tf.maximum / tf.minimum(a(x), b(x)): These create piecewise functions, which means derivatives can be discontinuous or zero for some ranges or points. * tf.clip_by_value / tf.clip_by_norm: These are also piecewise functions where the actual function is replaced with a constant piece for certain points or ranges, which makes the derivative zero, even if it actually isn’t. * tf.where(cond, a, b): This is another way of creating piecewise functions. This should be used only if it is really meant to create a piecewise function. The util.safe_ops submodule contains helper functions that can resolve issues with divisions by zero, but also helpers to ensure that the data is in the appropriate range. For instance a dot product of two normalized vectors can result in values outside of [-1.0, 1.0] range due to fixed point arithmetic. This in turn may result in NaN if used with arcsin or arccos. In such cases, safe_shrink in util.safe_ops should be used rather than clipping the range, since clipping removes derivatives which should be non-zero at these points. Cases involving zero divided by zero are a bit more involved and require dedicated workarounds. ### Software compatibility The library is intended to be compatible with the latest stable TensorFlow 1 release as well as the latest nightly package for TensorFlow 2. We also aim to be compatible with a couple of versions of Python. Testing for all the above is automatically performed using [travis](https://travis-ci.org/tensorflow/graphics). ### Hardware compatibility Except for performance reasons, every function must be hardware agnostic (e.g. CPU / GPU / TPU). ### Python modules Each module must contain a \_\_init__.py file which lists all the sub-modules it contains. ## Tests Testing code is essential to make the library usable by everyone at all times. In the following, we will briefly review our policies around unit testing and code coverage. ### Unit testing * all test classes must derive from tensorflow_graphics.util.test_case.TestCase * to improve readability of the code, and minimize duplication, the parameters passed to all the test functions described below are passed using `parameterized.parameters` provided by `absl.testing`. #### Test files Each module containing code has associated tests in the module's test sub-folder. Each test sub-folder must contain an empty \_\_init__.py, and one file per .py file in the module. For instance, if the `transformation` module contains `quaternion.py`, the tests associated with that python file should be located in `transformation/tests/quaterion_test.py`. In the following, we use FN as shorthand for the name of the function to be tested. Let's now have a look at how tests are structured and specific things to test for. #### Structure of a test TensorFlow Graphics follow the arrange-act-assert testing pattern. Moreover, if multiple tests are used in a single function to test for different but similar behavior, self.subTest should be used to create separate blocks. #### Testing return values The function names and behavior to use for testing return values are as follows: * `test_FN_random` to ensure that functions return the expected result for any valid input. * `test_FN_preset` to test specific inputs, and to make sure that corner cases are handled appropriately. #### Error handling Following are the function names and behavior to use for testing that errors are handled appropriately: * `test_FN_exception_raised` to test that functions return the expected error messages when input parameters are invalid (e.g. shape or values). * `test_FN_exception_not_raised` to make sure that valid arguments do not raise any errors. N.B.: For both test functions above, make sure to include `None` in some of the input shapes. #### Testing Jacobians Derivatives and gradients being at the core of Deep Learning training algorithms, testing for the stability and correctness of gradients is core to prevent problems, especially while training large networks. We perform numerical differentiation to ensure the correctness and stability of the Jacobians of any function by defining: * `test_FN_jacobian_random` to ensure that Jacobians are correct on the whole input domain. * `test_FN_jacobian_preset` to test the stability of Jacobian around corner cases, or points where the function might not be smooth / continuous. N.B.: for both test functions above, make sure to decorate them with `@flagsaver.flagsaver(tfg_add_asserts_to_graph=False)` to avoid potential errors arising due to finite differentiation (e.g. tensor not normalized anymore) ### Coverage The GitHub mirror of Tensorflow Graphics is using <a href="https://coveralls.io/">coveralls</a> to assess the test coverage. The version of Tensorflow Graphics that is internal to Google contains the same features compared to what is available on GitHub, but has access to more tools for testing. For this project, our internal policy is to only submit code for which our internal testing tools report at least 99% coverage. This number might seem to be a steep requirement, but given the nature of the project, this is obtained with reasonable efforts.
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/nn/loss/tests/chamfer_distance_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the chamfer distance loss.""" from absl.testing import parameterized import numpy as np from tensorflow_graphics.nn.loss import chamfer_distance from tensorflow_graphics.util import test_case def _random_tensor(tensor_shape): return np.random.uniform(low=0.0, high=1.0, size=tensor_shape) def _random_tensor_shape(): tensor_size = np.random.randint(3) + 1 return np.random.randint(1, 10, size=(tensor_size)).tolist() def _random_point_sets(): space_dimensions = np.random.randint(3) + 1 batch_shape = _random_tensor_shape() point_set_a_size = np.random.randint(10) + 1 point_set_b_size = np.random.randint(10) + 1 point_set_a_init = np.random.uniform( low=-100.0, high=100.0, size=batch_shape + [point_set_a_size, space_dimensions]) point_set_b_init = np.random.uniform( low=-100.0, high=100.0, size=batch_shape + [point_set_b_size, space_dimensions]) return (point_set_a_init, point_set_b_init) class ChamferDistanceTest(test_case.TestCase): @parameterized.parameters( (((0., 0), (0, 1), (1, 0), (-1, 0)), ((0., 0), (0, 2), (0.7, 0.4), (-0.5, -0.5)), # a[0] -> b[0] (0 + \ # a[1] -> b[2] 0.7**2 + 0.6**2 + \ # a[2] -> b[2] 0.3**2 + 0.4**2 + \ # a[3] -> b[3] 0.5) / 4 + \ # b[0] -> a[0] (0 + \ # b[1] -> a[1] 1 + \ # b[2] -> a[2] 0.3**2 + 0.4**2 + \ # b[3] -> a[3] 0.5) / 4), (((0., 1, 4), (3, 4, 2)), ((2., 2, 2), (2, 3, 4), (3, 2, 2)), # a[0] -> b[1] (8 + \ # a[1] -> b[2] 4) / 2 + \ # b[0] -> a[1] (5 + \ # b[1] -> a[1] 6 + \ # b[2] -> a[1] 4) / 3), ) def test_evaluate_preset(self, point_set_a, point_set_b, expected_distance): tensor_shape = _random_tensor_shape() point_set_a = np.tile(point_set_a, tensor_shape + [1, 1]) point_set_b = np.tile(point_set_b, tensor_shape + [1, 1]) expected = np.tile(expected_distance, tensor_shape) result = chamfer_distance.evaluate(point_set_a, point_set_b) self.assertAllClose(expected, result) def test_chamfer_distance_evaluate_jacobian(self): """Tests the Jacobian of the Chamfer distance loss.""" point_set_a, point_set_b = _random_point_sets() with self.subTest(name="jacobian_wrt_point_set_a"): self.assert_jacobian_is_correct_fn( lambda x: chamfer_distance.evaluate(x, point_set_b), [point_set_a], atol=1e-5) with self.subTest(name="jacobian_wrt_point_set_b"): self.assert_jacobian_is_correct_fn( lambda x: chamfer_distance.evaluate(point_set_a, x), [point_set_b], atol=1e-5) @parameterized.parameters( ("Not all batch dimensions are broadcast-compatible.", (1, 3, 5, 3), (2, 4, 3)), ("Not all batch dimensions are broadcast-compatible.", (3, 3, 5), (2, 4, 5)), ("point_set_b must have exactly 3 dimensions in axis -1,.", (2, 4, 3), (2, 4, 2)), ("point_set_b must have exactly 2 dimensions in axis -1,.", (2, 4, 2), (2, 4, 3)), ) def test_evaluate_shape_exception_raised(self, error_msg, *shape): """Tests that the shape exception is raised.""" self.assert_exception_is_raised(chamfer_distance.evaluate, error_msg, shape) @parameterized.parameters( ((1, 5, 6, 3), (2, 5, 9, 3)), ((None, 2, 6, 2), (4, 2, None, 4, 2)), ((3, 5, 8, 7), (3, 1, 1, 7)), ) def test_evaluate_shape_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(chamfer_distance.evaluate, shapes) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the chamfer distance loss.""" from absl.testing import parameterized import numpy as np from tensorflow_graphics.nn.loss import chamfer_distance from tensorflow_graphics.util import test_case def _random_tensor(tensor_shape): return np.random.uniform(low=0.0, high=1.0, size=tensor_shape) def _random_tensor_shape(): tensor_size = np.random.randint(3) + 1 return np.random.randint(1, 10, size=(tensor_size)).tolist() def _random_point_sets(): space_dimensions = np.random.randint(3) + 1 batch_shape = _random_tensor_shape() point_set_a_size = np.random.randint(10) + 1 point_set_b_size = np.random.randint(10) + 1 point_set_a_init = np.random.uniform( low=-100.0, high=100.0, size=batch_shape + [point_set_a_size, space_dimensions]) point_set_b_init = np.random.uniform( low=-100.0, high=100.0, size=batch_shape + [point_set_b_size, space_dimensions]) return (point_set_a_init, point_set_b_init) class ChamferDistanceTest(test_case.TestCase): @parameterized.parameters( (((0., 0), (0, 1), (1, 0), (-1, 0)), ((0., 0), (0, 2), (0.7, 0.4), (-0.5, -0.5)), # a[0] -> b[0] (0 + \ # a[1] -> b[2] 0.7**2 + 0.6**2 + \ # a[2] -> b[2] 0.3**2 + 0.4**2 + \ # a[3] -> b[3] 0.5) / 4 + \ # b[0] -> a[0] (0 + \ # b[1] -> a[1] 1 + \ # b[2] -> a[2] 0.3**2 + 0.4**2 + \ # b[3] -> a[3] 0.5) / 4), (((0., 1, 4), (3, 4, 2)), ((2., 2, 2), (2, 3, 4), (3, 2, 2)), # a[0] -> b[1] (8 + \ # a[1] -> b[2] 4) / 2 + \ # b[0] -> a[1] (5 + \ # b[1] -> a[1] 6 + \ # b[2] -> a[1] 4) / 3), ) def test_evaluate_preset(self, point_set_a, point_set_b, expected_distance): tensor_shape = _random_tensor_shape() point_set_a = np.tile(point_set_a, tensor_shape + [1, 1]) point_set_b = np.tile(point_set_b, tensor_shape + [1, 1]) expected = np.tile(expected_distance, tensor_shape) result = chamfer_distance.evaluate(point_set_a, point_set_b) self.assertAllClose(expected, result) def test_chamfer_distance_evaluate_jacobian(self): """Tests the Jacobian of the Chamfer distance loss.""" point_set_a, point_set_b = _random_point_sets() with self.subTest(name="jacobian_wrt_point_set_a"): self.assert_jacobian_is_correct_fn( lambda x: chamfer_distance.evaluate(x, point_set_b), [point_set_a], atol=1e-5) with self.subTest(name="jacobian_wrt_point_set_b"): self.assert_jacobian_is_correct_fn( lambda x: chamfer_distance.evaluate(point_set_a, x), [point_set_b], atol=1e-5) @parameterized.parameters( ("Not all batch dimensions are broadcast-compatible.", (1, 3, 5, 3), (2, 4, 3)), ("Not all batch dimensions are broadcast-compatible.", (3, 3, 5), (2, 4, 5)), ("point_set_b must have exactly 3 dimensions in axis -1,.", (2, 4, 3), (2, 4, 2)), ("point_set_b must have exactly 2 dimensions in axis -1,.", (2, 4, 2), (2, 4, 3)), ) def test_evaluate_shape_exception_raised(self, error_msg, *shape): """Tests that the shape exception is raised.""" self.assert_exception_is_raised(chamfer_distance.evaluate, error_msg, shape) @parameterized.parameters( ((1, 5, 6, 3), (2, 5, 9, 3)), ((None, 2, 6, 2), (4, 2, None, 4, 2)), ((3, 5, 8, 7), (3, 1, 1, 7)), ) def test_evaluate_shape_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(chamfer_distance.evaluate, shapes) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./.github/workflows/build.yml
# Continuous integration tests executed on push and pull request actions # see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions name: Build on: push: branches: [ master ] pull_request: branches: [ master ] jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Set up Python uses: actions/setup-python@v2 with: python-version: 3.8 - name: Install system requirements run: | sudo xargs apt-get update sudo xargs apt-get -y install < requirements.unix - name: Install pip requirements run: | python -m pip install --upgrade pip pip install -U -r requirements.txt pip install -U pytest coveralls pip install -U flake8 pip install -U setuptools wheel - name: Build ops run: | bazel build tensorflow_graphics/... --define=BASEDIR=$(pwd) --sandbox_writable_path=$(pwd) bazel clean --expunge - name: Run python tests and coverage env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} MESA_GL_VERSION_OVERRIDE: 4.5 MESA_GLSL_VERSION_OVERRIDE: 450 run: | coverage run --source tensorflow_graphics -m py.test coveralls --service=github - name: Linter run: | flake8 --config=.flake8 tensorflow_graphics/ - name: Build pip package and install run: | python setup.py sdist bdist_wheel pip install dist/*.whl - name: Test install run: | cd $(mktemp -d) && python -c 'import tensorflow_graphics as tfg'
# Continuous integration tests executed on push and pull request actions # see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions name: Build on: push: branches: [ master ] pull_request: branches: [ master ] jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Set up Python uses: actions/setup-python@v2 with: python-version: 3.8 - name: Install system requirements run: | sudo xargs apt-get update sudo xargs apt-get -y install < requirements.unix - name: Install pip requirements run: | python -m pip install --upgrade pip pip install -U -r requirements.txt pip install -U pytest coveralls pip install -U flake8 pip install -U setuptools wheel - name: Build ops run: | bazel build tensorflow_graphics/... --define=BASEDIR=$(pwd) --sandbox_writable_path=$(pwd) bazel clean --expunge - name: Run python tests and coverage env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} MESA_GL_VERSION_OVERRIDE: 4.5 MESA_GLSL_VERSION_OVERRIDE: 450 run: | coverage run --source tensorflow_graphics -m py.test coveralls --service=github - name: Linter run: | flake8 --config=.flake8 tensorflow_graphics/ - name: Build pip package and install run: | python setup.py sdist bdist_wheel pip install dist/*.whl - name: Test install run: | cd $(mktemp -d) && python -c 'import tensorflow_graphics as tfg'
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/geometry/representation/triangle.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tensorflow triangle utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.math import vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def normal(v0, v1, v2, clockwise=False, normalize=True, name="triangle_normal"): """Computes face normals (triangles). Note: In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. Args: v0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the first vertex of a triangle. v1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the second vertex of a triangle. v2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the third vertex of a triangle. clockwise: Winding order to determine front-facing triangles. normalize: A `bool` indicating whether output normals should be normalized by the function. name: A name for this op. Defaults to "triangle_normal". Returns: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized vector. Raises: ValueError: If the shape of `v0`, `v1`, or `v2` is not supported. """ with tf.name_scope(name): v0 = tf.convert_to_tensor(value=v0) v1 = tf.convert_to_tensor(value=v1) v2 = tf.convert_to_tensor(value=v2) shape.check_static(tensor=v0, tensor_name="v0", has_dim_equals=(-1, 3)) shape.check_static(tensor=v1, tensor_name="v1", has_dim_equals=(-1, 3)) shape.check_static(tensor=v2, tensor_name="v2", has_dim_equals=(-1, 3)) shape.compare_batch_dimensions( tensors=(v0, v1, v2), last_axes=-2, broadcast_compatible=True) normal_vector = vector.cross(v1 - v0, v2 - v0, axis=-1) normal_vector = asserts.assert_nonzero_norm(normal_vector) if not clockwise: normal_vector *= -1.0 if normalize: return tf.nn.l2_normalize(normal_vector, axis=-1) return normal_vector def area(v0, v1, v2, name="triangle_area"): """Computes triangle areas. Note: Computed triangle area = 0.5 * | e1 x e2 | where e1 and e2 are edges of triangle. A degenerate triangle will return 0 area, whereas the normal for a degenerate triangle is not defined. In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. Args: v0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the first vertex of a triangle. v1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the second vertex of a triangle. v2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the third vertex of a triangle. name: A name for this op. Defaults to "triangle_area". Returns: A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents a normalized vector. """ with tf.name_scope(name): v0 = tf.convert_to_tensor(value=v0) v1 = tf.convert_to_tensor(value=v1) v2 = tf.convert_to_tensor(value=v2) shape.check_static(tensor=v0, tensor_name="v0", has_dim_equals=(-1, 3)) shape.check_static(tensor=v1, tensor_name="v1", has_dim_equals=(-1, 3)) shape.check_static(tensor=v2, tensor_name="v2", has_dim_equals=(-1, 3)) shape.compare_batch_dimensions( tensors=(v0, v1, v2), last_axes=-2, broadcast_compatible=True) normals = vector.cross(v1 - v0, v2 - v0, axis=-1) return 0.5 * tf.linalg.norm(tensor=normals, axis=-1, keepdims=True) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tensorflow triangle utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.math import vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def normal(v0, v1, v2, clockwise=False, normalize=True, name="triangle_normal"): """Computes face normals (triangles). Note: In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. Args: v0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the first vertex of a triangle. v1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the second vertex of a triangle. v2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the third vertex of a triangle. clockwise: Winding order to determine front-facing triangles. normalize: A `bool` indicating whether output normals should be normalized by the function. name: A name for this op. Defaults to "triangle_normal". Returns: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized vector. Raises: ValueError: If the shape of `v0`, `v1`, or `v2` is not supported. """ with tf.name_scope(name): v0 = tf.convert_to_tensor(value=v0) v1 = tf.convert_to_tensor(value=v1) v2 = tf.convert_to_tensor(value=v2) shape.check_static(tensor=v0, tensor_name="v0", has_dim_equals=(-1, 3)) shape.check_static(tensor=v1, tensor_name="v1", has_dim_equals=(-1, 3)) shape.check_static(tensor=v2, tensor_name="v2", has_dim_equals=(-1, 3)) shape.compare_batch_dimensions( tensors=(v0, v1, v2), last_axes=-2, broadcast_compatible=True) normal_vector = vector.cross(v1 - v0, v2 - v0, axis=-1) normal_vector = asserts.assert_nonzero_norm(normal_vector) if not clockwise: normal_vector *= -1.0 if normalize: return tf.nn.l2_normalize(normal_vector, axis=-1) return normal_vector def area(v0, v1, v2, name="triangle_area"): """Computes triangle areas. Note: Computed triangle area = 0.5 * | e1 x e2 | where e1 and e2 are edges of triangle. A degenerate triangle will return 0 area, whereas the normal for a degenerate triangle is not defined. In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. Args: v0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the first vertex of a triangle. v1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the second vertex of a triangle. v2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the third vertex of a triangle. name: A name for this op. Defaults to "triangle_area". Returns: A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents a normalized vector. """ with tf.name_scope(name): v0 = tf.convert_to_tensor(value=v0) v1 = tf.convert_to_tensor(value=v1) v2 = tf.convert_to_tensor(value=v2) shape.check_static(tensor=v0, tensor_name="v0", has_dim_equals=(-1, 3)) shape.check_static(tensor=v1, tensor_name="v1", has_dim_equals=(-1, 3)) shape.check_static(tensor=v2, tensor_name="v2", has_dim_equals=(-1, 3)) shape.compare_batch_dimensions( tensors=(v0, v1, v2), last_axes=-2, broadcast_compatible=True) normals = vector.cross(v1 - v0, v2 - v0, axis=-1) return 0.5 * tf.linalg.norm(tensor=normals, axis=-1, keepdims=True) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/projects/local_implicit_grid/core/implicit_nets.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Implementations of various implicit function networks architectures. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf layers = tf.keras.layers class ImNet(layers.Layer): """ImNet layer keras implementation. """ def __init__(self, dim=3, in_features=128, out_features=1, num_filters=128, activation=tf.nn.leaky_relu, name='im_net'): """Initialization. Args: dim: int, dimension of input points. in_features: int, length of input features (i.e., latent code). out_features: number of output features. num_filters: int, width of the second to last layer. activation: tf activation op. name: str, name of the layer. """ super(ImNet, self).__init__(name=name) self.dim = dim self.in_features = in_features self.dimz = dim + in_features self.out_features = out_features self.num_filters = num_filters self.activ = activation self.fc0 = layers.Dense(num_filters*16, name='dense_1') self.fc1 = layers.Dense(num_filters*8, name='dense_2') self.fc2 = layers.Dense(num_filters*4, name='dense_3') self.fc3 = layers.Dense(num_filters*2, name='dense_4') self.fc4 = layers.Dense(num_filters*1, name='dense_5') self.fc5 = layers.Dense(out_features, name='dense_6') self.fc = [self.fc0, self.fc1, self.fc2, self.fc3, self.fc4, self.fc5] def call(self, x, training=False): """Forward method. Args: x: `[batch_size, dim+in_features]` tensor, inputs to decode. training: bool, flag indicating training phase. Returns: x_: output through this layer. """ x_ = x for dense in self.fc[:4]: x_ = self.activ(dense(x_)) x_ = tf.concat([x_, x], axis=-1) x_ = self.activ(self.fc4(x_)) x_ = self.fc5(x_) return x_
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Implementations of various implicit function networks architectures. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf layers = tf.keras.layers class ImNet(layers.Layer): """ImNet layer keras implementation. """ def __init__(self, dim=3, in_features=128, out_features=1, num_filters=128, activation=tf.nn.leaky_relu, name='im_net'): """Initialization. Args: dim: int, dimension of input points. in_features: int, length of input features (i.e., latent code). out_features: number of output features. num_filters: int, width of the second to last layer. activation: tf activation op. name: str, name of the layer. """ super(ImNet, self).__init__(name=name) self.dim = dim self.in_features = in_features self.dimz = dim + in_features self.out_features = out_features self.num_filters = num_filters self.activ = activation self.fc0 = layers.Dense(num_filters*16, name='dense_1') self.fc1 = layers.Dense(num_filters*8, name='dense_2') self.fc2 = layers.Dense(num_filters*4, name='dense_3') self.fc3 = layers.Dense(num_filters*2, name='dense_4') self.fc4 = layers.Dense(num_filters*1, name='dense_5') self.fc5 = layers.Dense(out_features, name='dense_6') self.fc = [self.fc0, self.fc1, self.fc2, self.fc3, self.fc4, self.fc5] def call(self, x, training=False): """Forward method. Args: x: `[batch_size, dim+in_features]` tensor, inputs to decode. training: bool, flag indicating training phase. Returns: x_: output through this layer. """ x_ = x for dense in self.fc[:4]: x_ = self.activ(dense(x_)) x_ = tf.concat([x_, x], axis=-1) x_ = self.activ(self.fc4(x_)) x_ = self.fc5(x_) return x_
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/opensource_only.files
tensorflow_graphics/rendering/opengl/BUILD
tensorflow_graphics/rendering/opengl/BUILD
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/rendering/kernels/rasterize_triangles_impl.h
/* Copyright 2020 The TensorFlow Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_KERNELS_RASTERIZE_TRIANGLES_IMPL_H_ #define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_KERNELS_RASTERIZE_TRIANGLES_IMPL_H_ #include "absl/base/integral_types.h" // Determines the mode for face culling. Analogous to OpenGL's glCullFace // parameters. enum class FaceCullingMode { kNone = 0, kBack, kFront }; // Computes the triangle id, barycentric coordinates, and z-buffer at each pixel // in the image. // // vertices: A flattened 2D array with 4*vertex_count elements. // Each contiguous triplet is the XYZW location of the vertex with that // triplet's id. The coordinates are assumed to be OpenGL-style clip-space // (i.e., post-projection, pre-divide), where X points right, Y points up, // Z points away. // triangles: A flattened 2D array with 3*triangle_count elements. // Each contiguous triplet is the three vertex ids indexing into vertices // describing one triangle with clockwise winding. // triangle_count: The number of triangles stored in the array triangles. // num_layers: Number of surface layers to store at each pixel, esentially // depth-peeling (https://en.wikipedia.org/wiki/Depth_peeling). // face_culling_mode: mode for culling back-facing triangles, front-facing // triangles, or none. // triangle_ids: A flattened 2D array with num_layers*image_height*image_width // elements. At return, each pixel contains a triangle id in the range // [0, triangle_count). The id value is also 0 if there is no triangle // at the pixel. The barycentric_coordinates must be checked to // distinguish the two cases. // z_buffer: A flattened 2D array with num_layers*image_height*image_width // elements. At return, contains the normalized device Z coordinates of the // rendered triangles. // barycentric_coordinates: A flattened 3D array with // num_layers*image_height*image_width*3 elements. At return, contains the // triplet of barycentric coordinates at each pixel in the same vertex // ordering as triangles. If no triangle is present, all coordinates are 0. // May be nullptr if barycentric coordinates are not desired. void RasterizeTrianglesImpl(const float* vertices, const int32* triangles, int32 triangle_count, int32 image_width, int32 image_height, int32 num_layers, FaceCullingMode face_culling_mode, int32* triangle_ids, float* z_buffer, float* barycentric_coordinates); #endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_KERNELS_RASTERIZE_TRIANGLES_IMPL_H_
/* Copyright 2020 The TensorFlow Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_KERNELS_RASTERIZE_TRIANGLES_IMPL_H_ #define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_KERNELS_RASTERIZE_TRIANGLES_IMPL_H_ #include "absl/base/integral_types.h" // Determines the mode for face culling. Analogous to OpenGL's glCullFace // parameters. enum class FaceCullingMode { kNone = 0, kBack, kFront }; // Computes the triangle id, barycentric coordinates, and z-buffer at each pixel // in the image. // // vertices: A flattened 2D array with 4*vertex_count elements. // Each contiguous triplet is the XYZW location of the vertex with that // triplet's id. The coordinates are assumed to be OpenGL-style clip-space // (i.e., post-projection, pre-divide), where X points right, Y points up, // Z points away. // triangles: A flattened 2D array with 3*triangle_count elements. // Each contiguous triplet is the three vertex ids indexing into vertices // describing one triangle with clockwise winding. // triangle_count: The number of triangles stored in the array triangles. // num_layers: Number of surface layers to store at each pixel, esentially // depth-peeling (https://en.wikipedia.org/wiki/Depth_peeling). // face_culling_mode: mode for culling back-facing triangles, front-facing // triangles, or none. // triangle_ids: A flattened 2D array with num_layers*image_height*image_width // elements. At return, each pixel contains a triangle id in the range // [0, triangle_count). The id value is also 0 if there is no triangle // at the pixel. The barycentric_coordinates must be checked to // distinguish the two cases. // z_buffer: A flattened 2D array with num_layers*image_height*image_width // elements. At return, contains the normalized device Z coordinates of the // rendered triangles. // barycentric_coordinates: A flattened 3D array with // num_layers*image_height*image_width*3 elements. At return, contains the // triplet of barycentric coordinates at each pixel in the same vertex // ordering as triangles. If no triangle is present, all coordinates are 0. // May be nullptr if barycentric coordinates are not desired. void RasterizeTrianglesImpl(const float* vertices, const int32* triangles, int32 triangle_count, int32 image_width, int32 image_height, int32 num_layers, FaceCullingMode face_culling_mode, int32* triangle_ids, float* z_buffer, float* barycentric_coordinates); #endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_KERNELS_RASTERIZE_TRIANGLES_IMPL_H_
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/rendering/light/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Light module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_graphics.rendering.light import point_light from tensorflow_graphics.util import export_api as _export_api # API contains submodules of tensorflow_graphics.rendering.light. __all__ = _export_api.get_modules()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Light module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_graphics.rendering.light import point_light from tensorflow_graphics.util import export_api as _export_api # API contains submodules of tensorflow_graphics.rendering.light. __all__ = _export_api.get_modules()
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/rendering/opengl/egl_offscreen_context.h
/* Copyright 2020 The TensorFlow Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_OFFSCREEN_CONTEXT_H_ #define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_OFFSCREEN_CONTEXT_H_ #include <EGL/egl.h> #include <memory> #include "tensorflow/core/lib/core/status.h" // EGL is an interface between OpenGL ES and the windowing system of the native // platform. The following class provides functionality to manage an EGL // off-screen contexts. class EGLOffscreenContext { public: ~EGLOffscreenContext(); // Creates an EGL display, pixel buffer surface, and context that can be used // for rendering. These objects are created with default parameters // // Arguments: // * egl_offscreen_context: if the method is successful, this object holds a // valid offscreen context. // // Returns: // A tensorflow::Status object storing tensorflow::Status::OK() on success, // and an object of type tensorflow::errors otherwise. static tensorflow::Status Create( std::unique_ptr<EGLOffscreenContext>* egl_offscreen_context); // Creates an EGL display, pixel buffer surface, and context that can be used // for rendering. // // Arguments: // * pixel_buffer_width: width of the pixel buffer surface. // * pixel_buffer_height: height of the pixel buffer surface. // * context: if the method succeeds, this variable returns an object storing // a valid display, context, and pixel buffer surface. // * configuration_attributes: attributes used to build frame buffer // * configurations. // * context_attributes: attributes used to create the EGL context. // * rendering_api: defines the rendering API for the current thread. The // available APIs are EGL_OPENGL_API, EGL_OPENGL_ES_API, and // EGL_OPENVG_API. // * egl_offscreen_context: if the method is successful, this object holds a // valid offscreen context. // // Returns: // A tensorflow::Status object storing tensorflow::Status::OK() on success, // and an object of type tensorflow::errors otherwise. static tensorflow::Status Create( const int pixel_buffer_width, const int pixel_buffer_height, const EGLenum rendering_api, const EGLint* configuration_attributes, const EGLint* context_attributes, std::unique_ptr<EGLOffscreenContext>* egl_offscreen_context); // Binds the EGL context to the current rendering thread and to the pixel // buffer surface. Note that this context must not be current in any other // thread. // // Returns: // A tensorflow::Status object storing tensorflow::Status::OK() on success, // and an object of type tensorflow::errors otherwise. tensorflow::Status MakeCurrent() const; // Un-binds the current EGL rendering context from the current rendering // thread and from the pixel buffer surface. // // Returns: // A tensorflow::Status object storing tensorflow::Status::OK() on success, // and an object of type tensorflow::errors otherwise. tensorflow::Status Release(); private: EGLOffscreenContext() = delete; EGLOffscreenContext(EGLContext context, EGLDisplay display, EGLSurface pixel_buffer_surface); EGLOffscreenContext(const EGLOffscreenContext&) = delete; EGLOffscreenContext(EGLOffscreenContext&&) = delete; EGLOffscreenContext& operator=(const EGLOffscreenContext&) = delete; EGLOffscreenContext& operator=(EGLOffscreenContext&&) = delete; tensorflow::Status Destroy(); EGLContext context_; EGLDisplay display_; EGLSurface pixel_buffer_surface_; }; #endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_OFFSCREEN_CONTEXT_H_
/* Copyright 2020 The TensorFlow Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_OFFSCREEN_CONTEXT_H_ #define THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_OFFSCREEN_CONTEXT_H_ #include <EGL/egl.h> #include <memory> #include "tensorflow/core/lib/core/status.h" // EGL is an interface between OpenGL ES and the windowing system of the native // platform. The following class provides functionality to manage an EGL // off-screen contexts. class EGLOffscreenContext { public: ~EGLOffscreenContext(); // Creates an EGL display, pixel buffer surface, and context that can be used // for rendering. These objects are created with default parameters // // Arguments: // * egl_offscreen_context: if the method is successful, this object holds a // valid offscreen context. // // Returns: // A tensorflow::Status object storing tensorflow::Status::OK() on success, // and an object of type tensorflow::errors otherwise. static tensorflow::Status Create( std::unique_ptr<EGLOffscreenContext>* egl_offscreen_context); // Creates an EGL display, pixel buffer surface, and context that can be used // for rendering. // // Arguments: // * pixel_buffer_width: width of the pixel buffer surface. // * pixel_buffer_height: height of the pixel buffer surface. // * context: if the method succeeds, this variable returns an object storing // a valid display, context, and pixel buffer surface. // * configuration_attributes: attributes used to build frame buffer // * configurations. // * context_attributes: attributes used to create the EGL context. // * rendering_api: defines the rendering API for the current thread. The // available APIs are EGL_OPENGL_API, EGL_OPENGL_ES_API, and // EGL_OPENVG_API. // * egl_offscreen_context: if the method is successful, this object holds a // valid offscreen context. // // Returns: // A tensorflow::Status object storing tensorflow::Status::OK() on success, // and an object of type tensorflow::errors otherwise. static tensorflow::Status Create( const int pixel_buffer_width, const int pixel_buffer_height, const EGLenum rendering_api, const EGLint* configuration_attributes, const EGLint* context_attributes, std::unique_ptr<EGLOffscreenContext>* egl_offscreen_context); // Binds the EGL context to the current rendering thread and to the pixel // buffer surface. Note that this context must not be current in any other // thread. // // Returns: // A tensorflow::Status object storing tensorflow::Status::OK() on success, // and an object of type tensorflow::errors otherwise. tensorflow::Status MakeCurrent() const; // Un-binds the current EGL rendering context from the current rendering // thread and from the pixel buffer surface. // // Returns: // A tensorflow::Status object storing tensorflow::Status::OK() on success, // and an object of type tensorflow::errors otherwise. tensorflow::Status Release(); private: EGLOffscreenContext() = delete; EGLOffscreenContext(EGLContext context, EGLDisplay display, EGLSurface pixel_buffer_surface); EGLOffscreenContext(const EGLOffscreenContext&) = delete; EGLOffscreenContext(EGLOffscreenContext&&) = delete; EGLOffscreenContext& operator=(const EGLOffscreenContext&) = delete; EGLOffscreenContext& operator=(EGLOffscreenContext&&) = delete; tensorflow::Status Destroy(); EGLContext context_; EGLDisplay display_; EGLSurface pixel_buffer_surface_; }; #endif // THIRD_PARTY_PY_TENSORFLOW_GRAPHICS_RENDERING_OPENGL_EGL_OFFSCREEN_CONTEXT_H_
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./tensorflow_graphics/nn/layer/tests/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
-1
tensorflow/graphics
489
Enforce `Framebuffer` to accept only tensors with single batch dimension.
Enforce `Framebuffer` to accept only tensors with single batch dimension.
copybara-service[bot]
"2021-02-03T21:06:22Z"
"2021-02-12T23:59:48Z"
3a4f1952ed967fb884dc031eeda6dac3fbefbe52
b7a2bf260d6fcf924fddcbb6dba36c72ece66990
Enforce `Framebuffer` to accept only tensors with single batch dimension.. Enforce `Framebuffer` to accept only tensors with single batch dimension.
./.git/objects/pack/pack-ceb7c92f2b0f5403ca04c0b8128e51d67b830874.idx
tOc1N}/Lf{6Re5Ji#Ga /Uu0Fi:Wz5Un  A V i C ^ y  ( L f   ? b  ! A Z x 7Pl"?[t -Op'CYt.Rr5]}:Rn+Pl)Qr  =]t 'FbDg 'Jk#@Zy .LdPfʘhI 3Ӡ0<5OȜpFKHD@L(0_Z1Z $"<tZ/IEܟ# AΪq?#-&}訣(ZmECcx۬M R5bXk@+E:b-IZz(e9ֺ)Ey ͺ!#m8VƙF0% Ur{\Ȥ8(fRj`qY۔H1|dZex 5.Ro#xzCE~ 0642^! X[ c:4<.{FZ&j E 8*֞}lDN۶_W9a1[)ޭPZc/,ɖEmD:(g16 ¨5 ҇H4?k>c%1Íљ-ed!z}Iڭakp\aT_۟} I"4nj.ԋ_H,#/=ylm )t @g~53J(`Gw(>mwX;LpDI [ eR5ktKH|^f'*0IGcD',Rg U=5nrn3R?׆<q|4tV4q/IpMY^?⇕8@ X}l^ʞ/]ʍLCm-gLI|~r_q@nWυe0PWpm-@ @xIT޿`>MLڤ 9e}cF9\-<6p?Vʩu?و*i8Xqd1yઋm.Tw\J͇W[ l󺩱NE"(sMͷqh!̤U7 =bfvH8 \*6jSg-qJ?HwC؈e,=P!{Sibu4VP;z:"R0"=&1S +ysjP;-5DHj776]O=zV D K=RHidzZ{rL! \ XbnW C]_.TWc[KWBeBr(/j/1pbtlf_(th!MtV[V6=&o&ɕ,]pnE}|u K.P 1sFu@riVCgu-QhpnG(zَE`F6+Ay;jz5Дhz:ng+0LIi$tbHi$j h_>#'6$qO]'b]-?'w#sʛhMȸ9FS37i1 yd!zJ.WQ(P U1_]eLXU^(: gJcSs3g ߞ t2n#GNV&]g=k_r9רZo fъ}FoT/rzi3$:PDg\\y{-}nѕI;W3r< =w) Ffzp j:8D#\(aіc:p$q=8 &,8g.KJ>mjϙ)ق'q+L zQv@T}{pLґGs F\- JEEsqVQkK9o8M.M ׆=08mq"=R1*94xؙx]%T /0@Lǰk0]`E1<FAM'Dh-:L;߂whtk%Gy hd1 ՖxgyZ1P5RB"qX:SjL!O%-}y8+#D'pE!LNS]a4P۵hyhJg <SCc:VW0ߐD#Uz>ߠӨJk q`gA(u㒆JiZvk%@:62weԆ~Rg4-d`<".vA>PW>N6X%",G&'-HyU{,d4#=l`CnJՉUF 8̾4XʆД'^+*N~L/Wl!$>Lկi&E(Au^x!n\BV")u?E^AI\vރĜfHgkT=%..wT<OKhIe9;[ΆWOF O>VU:=߻CHp[˼b_=W SXskсIxYEO,^vvb$'Lu~"_H(0_rHIC]YlWՀ_h;1i 7@Siym<8S$}sB<QΎT5儱 KJنhnDܫ5>Ji_'̘`xAyKjA%hJ/vlMrj 9IeБ4N3'Q xUbh:*)cIΰgTj9N;aLzBLw;1̵fBC+<!M;ЯXBDaǦaeDp5bF"bde$R/GrRUOWy,"p+UJńBVz[tbYğ1D$)=k<j<a 6^ j^oje\+:xDR&( xx _ǂ.F;y~Ro+H~#2@&|> a~ԲaxT=6M!$ki8g)<tqmX`%bن6Ĵ-Ĥ16dJϯUaQhVg}JNzܪ)_'T AXֹnS0 <tc 3ʎ&]qaS< Έ,ZÚw+0[俎gMBdԆiBR备B;8 CPGc+-q]I‘,ᾱ$qwe59CD]Ĕ޹:0*UfN YN& HV5='R*9hTSki8fH50$nf pACغ-mSV@pWA|mVZd8; mm._]* qhs)r77N?Zs 5r6VtFn&ySMKʝEBN`S?nJYo5=(,\akܖRf aTwAr0p Z#Wh^ŧuP TY2S/~vqomrLn t^ãAW.a6w'g }"zZ.L"@LǺxf|ѡAnʹ1*ҭ)%kੜAKgaƢ#/߳!nFj|vW5}(ë#LmQnҡrz&:r2,7e "6̭җk*qh[n%jRz^EjY :Gq2T o9%Ɵ @E^/oݖ+0C: |QlGXƬ{ x={%%Ujq,1y^] \p ]S*62 {=Bg w< 'k6a#y&+}v%s~E 6(]?c E_wz , )B_IάoC'U.T Q*X mvK+14z҈  0H*+6 edZ(z[b="E 6ϞL $h|,CְZFƬBZ$Eo=xGFcqw@DV x߯S&XY?S<7\Yj3á)L-ePadb8eC5o?g\h</VzVvZT:mh^J"@ ^oIf ·U'9qQoIwsˇGM?y~YF]9J{_B8WSb-NJh;QmMlW*h?gBQW;8l^*Ǻ w\*eGUpFm7yG:Q9b[row9=cJ>VO $1 MS^~r'Ϩd0#F|ߡmWT/5KxI~ ~P6¾=d_cmEq {ǣ1dz`V9uX\Xi ɾ@[BxֱrFfأ(>H/&3:'[C<{zU8L3]7Rpx!|j#b;nڲ)7i1sߒ8;c\P(/nyVi,rDQ2SPqx6Į-{`Øv{ K meʝѧk܀.CL.g.hbL5!z37轉ij%NIB:l#iBi՟‣Y*SPwdiǖc#iK]!Ñ{7yXP,^NMZ{ }ICp&#Na,^eԖF~1xF_<Xp@!E͈0᾿Z@;t$sZUa )wE')L¨Зȉyip-q]?<+sV e}]M}b[20Uteoycc2? R/l T ćK(uR:fah rņ2ma-5{- NKU`}ü1 m^!cޱ Ԡ 2WfnfxA&64q( 7'=s/M'Ԛf g~O&PUe%ۗR $qg;k"E U*V}Rb6e. YJQ%Ž aM|K47ڔ hr1Q gJ ja*& AX k73F=D3G= |>c1_F ɔ4 haL)վh ȃ X+-g1R 5 n-4 anyx d a wՓ>|H d[' -VE, Qu&fХ0,,y *+QYQmu p3:{OЍ:~P X l 2bA z0tuk,ʥ'& A('Y5"䒮 ZHb.TlK -PQ2t_ą 5=6J⣆B'` +x%J)Aj6g q B: '.S9]; DA-(# UD :l:oF&Mt Ր z$R=y*z $#jQkB^a& 'V\ljF. *}}_;}rPuq 4ᘕz4L^2:k A䕭2=ٔNE K@) v9 PV/]¶rdT6 Ynp.倒j0;ՂU guUhaTѢa? *LX*w,} l{s.b n'5r+"4wE =5W^ k '2pwg2\ժ h1}Om!3L \Q/|J} Xz /l5:)xv&H gAK:È ŞSV|;>a ۯ’:s2c k}e6 pZO*| }nUMgiEl L~1 u0  AdXSs %v`u)[s5s]*Y\ '֜3K*L+z ."{t(7@9 8N{Z</&"z 9Pu>C'4[iq ;):nF3u̕pH <?E */N2^ B'.m_E IցApɼkLY bj&qrZPpMʹp gIqڤ ۊ=i tgiQY z2n.+LMټ0m z}kf5ş S]ci<V- N<z<C}eӱ f}Iv9 %NlBGPkpb SeUKHkQ{ ݀NPH d@QԐHfڊ П!:ql2e7 0FeH2OK`ZF6- `ƼΪ 炊0u<Ի ޱ,ˆ9d6 >ij ě:8 /NaS Pi((b0 Z񩧋a& `GR%\ g- x̠O m[5YԜ 79ULFT "Z FjSe*p K"twz"Lo S^Ű *03 XR)+b{Y+ 뮥Nxg2Zv SHJrmPG_\ ǰ+0 UFL]$d 9%A cj1K\<HG !5;ƋBex(3 )HX8aq(ȶ !o,pݬ! ZTYanA *'`nL`K  A66} "H @;fsك]!z- `f;l 蜓 2K⤲y S/[򌤞 ds =+):Bu.sI AOt|#SkSC|L G] J-Ÿ`U^ MFzձ3$΀\b=| P/N i&Qֺ Y-rr^M$3`4 c2O%܌q  dZz 3gߐ~<b YpT _C%P Ht.k`(T- &=ڿ@`uv(˔ cw0it X= fowXbds dm瘇9gݼR>Eˢ AUF%:0,R $aŶ O"w( >lk\,#8Hɒ ?F>, Ӗ u|dYz֕hKbl<8wVa*P.9&LAȤnD> T$K,6YW;2΂a7VY226{>THG) dXT G!zBC_m]x>(ɞ$ud^{  - b$@$!(7flV+Drvb}ȋKtOOqa/XWw]lUأ'OH(L)+ f{AW,kBsu\->NQf,UgDSP5SVDslQzuWG=ܰ6 m@OCVƵt8K!o;Z\j<=>LۻfbuJKk'u ԅմsqVF'zH׋%KFfj?7۲VU\/  #Hìa.<M 98+cz4# Jtf`ưU2ƜlJ< kNu@DlRCK D'" 3 o^rcJ$aJ}Yk7})q6Q{B&`7 j<q~ b{4Sfd䀾ڎ bE&8YA jUm5IyM%/!_N5LwDVN9} ܞhJO)@6ZXJa zkN4 kġ|#9k ~)ؖP6ֱ矀5#g*V,$`dGz')PO!82!L\=s32&suXYzTVZI5Ɇ8{X7 =ÄJm_20stQU?5+}E3Gb)eac3a[o]bc&  ArhCcz|,+x2M2*i!넶03b"+ym=J~'elAF+2SgW[" ތ϶0JpKS{[C'~Cb ]_ sKaSk>ڻb-9CWYg*6#J!0Ll|_lv͠?Zhᭃ/zN!'xVr]~A!~%꼉 nbX;6x&N\j_HTm~K5pc~xb)L T~U ]Q? ?mdY'{ܼX_`KZ ѥm Q+GhlF^7+1qr'!tE?S4vǟ$0X69n0{-ŷ1=+Qۼ< ^sOR˻ӰHiPSCnPWJɯ[ht35q7)u+l԰SIH Rg =Ew?c3\v\@ m!bYɹfhIKRybE2>m{ r[\pȸD@~`֌"EŴ DRQǴħr/J UF ug )RjbU|zД%C>h-|;Xt%bVmGt g5yJhYD.]y~`7=k#;W6^ֺ}O֣zn L_2X|E"&{VV俓"Z'3ZB<[_f4r$cRAʕ}Z Д26F}$SIsQz2OT`(kAkƔksk%oedpv.RTjI[YqWVn.x$FKwa0P)Jc^դ0tW02^0,"8Gq؟hgha_FaU;dLz )ropyFrS}Fz]/\6,[I`Ky_ۑF``?EM[E5.UQˁSɼv7gѴ #"*\}݉]byq .a6`}IL7 syz]!acaMEOZ8x}sh$Z4IP'@Fjqx l_BH[{ϘfFzhEYYJv}7®=yT7hS+{|*ڦ w孴%g?+N3TR[(1)e%xsRj_ <i ?WTO8sZ !!5VT9)tJ(`)Ƈ'EojC~>!XU͆?IraulO<9 w+ID&DGb;/}ט^Ha Vt7C뫬'QT5h +0I(_x%/ZWl9H=oוP}`5 ;?(85_j,oj DP&TƧ|d7.H29E!~o8]n&ޏ'@_Uil`7ƃDEF.Hk)amY!l x8ö玼Ru9^AyABv3vD¹ ᒾjRKB574+(x\ fj~s#瞐fѣ )-d[i-D -/3*#qx1</bevRk|QLòDV?`>:ps)zi0<|0_O=\9#x6-[~F,태,4,L-zdT#w IXsZMFr~&d&yoNbd<3]44o;CkT:2']cV_d=ܯ"—fF20iH1Jn /:oo 1vn-- J&l{!u4\ބa/- — r0Hve$/]+a6ŏx8tElK#KQ72vbU>3L@yoG"\fbĕGD h@I VGwSw;{q#nJNԡ{H6MH#zl^R8nb g.\T͟bQ[! PFWqu|KL_,x^Mԉ'3[#`LR|{Zn6;NzMdSO b)n&T L㣅ޏ7XzA ~kj`\ f0_{ AfQqed@Oc:(Eui؝e߲v`1ޤaÀOljk?h0~qq5^CDqV*M3B\<快fMАA;uaX`٢!Nc0~-78cɀ}?y==B@v*pxNVhPjWQ6|:`ۏf$WPE@{RLmۣ 2%^!Bk~ɴsh03-k.|EOo1z? DfcO d?uLHx?I'Qi#e;èeh)5Zt6~/W_ne;w,A5ԒS)EeLiU'm=`um΋6?uQ.HJg,5L^|/ܫK@*DfdLv9xRޱv?> )ڪx^شn|73ez% %{FDDz T mI@bV52+7LF&>ª`1WcČ62 z7veU\z$ hl@gIZS@%5/A]&;F {R@;ˤJb߮Oj?@;sbh >j5vQȻ8ȸ0NOn^EB.XV@Q Xv8}Va Js{Q~M ,;Q<?&>39FSc{ʪ(].#Zh|؛i9=Xs7]qd<Q4ף~]u1\y U?ӃWad4YZ7SbbaP>+ftV;P?Z 40z / sٌ'h[^ +J>z̗W!_a<g(|6PAm?&6& +e4ğ z.|s{BX\iO 6!}@9C**!)X\d7))V,/ 䛋ZUח2y:HP 0F}4֒N-~u4P[)]GR0$rH2gHjL7`xܹk rTRp'l38QGyύ(mV#yaC",/Z]<߻Dn<%S ,&xq$a.xyX֛Cw mv8<\\oZ}MGi׬}gsN~Zd~3eJ uv &ySI"XUW0>j6|ܸ܋jBhH 2-+;.B YJ M`6W}YBW4~lŗsON,#Y,^'v=%PHUbNkNs<Efٸg] wl,oxxM ?IR4㪍Һ[C3^'/W?Ⱥ7xv>]LbxVߥ[ tiY@,Oܓ'FXK}Euoz#Gx' &0Ѱm,0L}pNDǟN%j {43 $'<-9BI1'!,+dk"yc%(t;,pa$1fΑp=𪅢?ɫ!;|HXgw{N;.^$ȏ<AJLG8$ !8UevSVNVyi`Do@Qw{ڙ==V $ƖtzEf.9tʚQy>OS++{5kw 0zøS .Tl`Ć > |Q/~ t)%1WA+%TNoR*= BՌKJv*Os?~Nd)1$64CfowwL+Ђw':֛.ڷ؋ޗbl]b>Ic/0U8lMe`}sXZ}rݾ_Faxr:,`㌳_ Yՙ`-yWp{ : 9 ^ı.Vni L{5XLxK5MnOcĚ e!;IoƖN!$*?Sjh| -*5E]70P%df1vF 94`Q? {+ uθ6xׇgLʡB@EKkb78fA9I`[XdO3vӣZw_`8YP3){vTc0M]يB= fqRu9l|5GtT lpUI.pTw &(K[>Q=dSG!a(Z}xM5z"t2#8(qsGhRMbSSR|JEz; F49Q/5?뼵LQ%iz<ªGH%*OҙoZ~ bJb]4+P)1|kȵ M&)*Im}1V4t=[<HX+$Z] И=npq5!>_,X0]1KPᯅW&+nK`S+jw6HAo?^,Ǵ(y] jn , ?D>HͿC#^|Z_O8Y8Q[dm14R?NR.}}zHAoLcmz 6ݡO,4hhKYi67ti36RJ3\lot\o~-nn/Cbi<ʤyxbx_lEQ{Vp@-=>{E)SQV]ѱ^r܅|ЃDKA25:ė{1>I6Mhbδ)o v^^Up)\^4Yܾ ޹hBzu[B+Zq: I_$Xp)O~dG=# Ejޑ*qϩGWҟ?A'k:F6J.5uɧ2JGw j=qYrQ$h_.L .T|c!7 x3V``  I PG :aڬ?MB*#XxB\Su5ߩiJ=ۚ97W_͙>A6Xt }1B>|lnnJ"C$8$znkHbbOV3eC8x.4x`@ZPe F -- i"_<bS߃*\fu2Rp낉<7}h?dVeB2ji~Qpsw$j?>񌡂\SU_t'ߑwDG MQ%'Qp-TmdCY9ܟ0PyHN48q^̓DH%^9\=V =258`7Aa׬yє&HL aHfR"s0f_1w'6~3;AhQךӺ^ozTsSY/?lZ W9 @&;_͠ie#"epoMY<\ $U&t܅EȆSuo}j/q. BuߗP0Ņ3k{n{ 4Gy;5IiKK0 \6T馪oр~6::@x-VBs큎;W'݁.@ 5? ,%Ɇ[rqM ,:& 0gJDZMY˒־!k}Y؂0cqdz$5CWn|oM.F*>84;!)"J) IU%Rb9Xγk7G >|fϡz9( <^7iDP6h*@B(w[5s K7(Lvm؇;CNFsL]Z]6i+Wqֺ m 2F(##:i7ۢ=@>}Ʋm%?M0YVց#*g57 ʝRȭOIe3{K& `Qdd)[<.U Ǫ]6>}`6<]OՉN-_:=Dl0] ˱O^jF]]ÖŌ83Vj~~gDI2\O\Jl #s(Pu^O GwI!Wa)TwkGck^*#  wuؑFi럃mQ5 zb"9M*mW5((" +9 c f\6C tva]gR 92.=44D޽2EK_{q|X*h IVόpg10גH]-Kk$90Xg0i 9KBj Y ZY1G5|fk  DlGl>S'tGB,k*mq wΦH`S[}_, VQCU^jjRTi G=pjn}5%ӨappS'y ]OqgI+悁 ]} "y١%v,iA!;w!]\ܚi0 pLR0Vd5tgo\k! h\! <=R>5)cv}FM50P\)x\.|,R;oԃB|"Gwp2>v3Gt$4o&G/RxMU1?cÙu7UXv͔7<Q,v_U{@Zǂï&\耤@2V i8p뀖^VY\P>Pf=RJ'lEۨCwDG:￯&*2c :bCq3[9 ]偱$'U irpQ⠗y 9|, J %x JK"Z !j&WID= &r/ ,Ps ckiY &y1Z3#}q|k 4Kbl& W 6ЃqH%bǏerZl Df$,H(fZc7 JU7>v[Ns:Py Lio]':a]瘀  X(Mi{ db4[wLުQ5d&!7 {]׍g۲R @!p@!X @t?n"pԴ w=Z5׆b ?AnƮ,5v( mhϖB<%$ʺ x*8 \ 攞 ~kFxK +M'yLbnln ȆBFhDk}しgF RI|8nU4 cDP|: {Xm٭Ս#! 3Hl{x`P! 0^kHCSVҀSe!F5U*Aw&أЎi!9kBhe i<g!BW/\"4% !SxW.-@zג!e9{'-2z?%5!z{hPF&!%, ϲ)WI!U5eoRx:!nq][^z;3!`8k(|T %H!A Õy2Y^=!SƖ mu+Ow!ցj`u[w?R!TTSzW[V{!DNE/!C+߼q*<uÍ!;J&CGy"ߴ8SnPS"qϳE,BQq"H3+"F|ExPmM+"1($4j="7daH08pؙ\Ij"D]N&jܼs="DK@wC!upN+`"JN"Wl=*uT"SࢠoZ/+"TK :xR+p(]PG"_GrE}=NJY"ck_*i"3?!M@W"druҎ!cb "f5O_u "m@[֟>rBB:Y"nL6xp(" .(zK:׉=a"r'c^" tr%+""Mwcϯfזb"탢uG^]WGN5"ៅ\ιɉüސ"V)„XM."OrDl C!x# 2<B8#/}ToYr#e⟗[Nߚ0")O##KT\!6O#$컣ebQ ؀\E#%J;Fb3[UB#&^Lۻ:#@neP<;6#'~#W'68nApyH,;#Y8՘QʎS"at#c{; #sY.e?3m>#zW)-} Qo?8#ؓ!ޏ_X yuMK#wD >L#yرW@#J*)BIWDêt#Φ =v >#s.Ea#f9<TDf/#ʟҢ|$ <,9[k$@0 &x4$rghȂ|PuWӌ$mT @IIh{$"kޤUp_6-,qq$E q}t9$<RRuKP⬠J$ E9*m-$&V!mVNI+rKoˈ$<3e<nj2:5$GK2 h )GK$OZY&>qU R8w$T]&#N1zVNBxN$eٻJ,ӵ4~JU;$ff%di^8!$r"'XգҸW$y2c"Nw$zF\A߿J${ZQxBH2jM$|*]n"308*$ȯ98}D$κ(\1/4T6$?AtnM$mf~T޾;P)u$t-2d VVC$7/_<.ߒm$mcWMK?wk$ܤigDNR+I}.$,7H--Ň2N$k$r{l2^'zT$w7$'6=p-śÔ$%%x|q4$8/j"{/`v$C\}W^%Jkduk}-ҺqV %TAO!E3V8{$J%bB|.sja%'uK)d ũM %3_zxF8DV%6^->6:#-7%8HkOCiaЭzL%F|'G0^Ǧ*4A%Jq>K~ҟ# XOI%R_oǂ!vPnZ%Wά ɴF(%`G 4S'IE%cGoq>P}(%q[Ek0O*s7`M%tW]#Ue"%~ w WCDMwE%i+ηK1d|@ %8$@o@/9"g%ta҃W]96? %g57þ=Hg%x$)*n4%- 1+<ÕL%$~9FMorN}l%ԒH6T>D%σ4Y+.0Pv^ Z?%^$·]^%Il!Xț%#2[\KФ%I;n ]&ȴ5^$Pg9&'q}Au,&P.gw@CZq5C&랟+K fݩo\d&*Ա 羐&3.XPIÛ7Q&5s!)9l1o3f?J &8>g6' V pT&<Q \&ųL,L&QDw7'"^w^&SNPl5sĺ&U{fz]Xַ;&alO sBIQO&q6'8?Qf?&v%QS4*?l8&c<c#H? &A#w`ɔpl]6&V}YX-NvG&%:7`ӊ~& ΅%Jjw=&8pi/TsG&qKk'& HM WT;$x&yf([FbϘ9&ɲGY! [ Q;j&7y<PiT& P;&&N(ko^gDv &_*rpXW8ɦ44&Ƹ@FGw-&QdbwWDd' 0-w&rr'@b[>?2gP!u'\Lcf21B'9+ajʭ<I' 37^#oB̸ZVx':6]X *}6b}`'EX!ZFIQVn&RT'G^YrЛߎ҆sSE'K`orS'R '<Kx@-Е'TpH z47lG'ZMKM4'dHR*ţb1$8s'mv{G]MY'2:kN0X' o+n3TW~^'v5*pG-'ΑU/r5 'tXc Ѿ!U8B]'18ñs'!]l\ߌAYK8'f2Xh6O'']/0F£̏'T'S0Û4P'aKQR].P%`a'.MxEŽA4kf'z<'aġx'gl-dpC"0':|[:gP>i'CXYW)ЯY\+'Z}Msxk oЏ'\[U1$wi' 8lxWgpk|쵡'}mv*Bm]M '۰SSDUB'uH{x ȣi(?Q@FEPsG<( v4P̋3-@ 6BV(#%e=!'~ (* 52I>(0R.hҭ&(:idGr1% @;(:mu ԉ_1(?}1,sCP~vٰ(C-" ТoJuD(d3zci$i!(fThKKBxIS!p(h^%DD({u6R"PMJ(n7-U/?~LmA( c҄OB(krO?j /h(O5APd{N (SH}" \ (x#aړA #(Bc}*2qC( BSR0'쓵z#(;B_E;ӂWB(gosiȞi(_2:Sl73,(e(n\Vc9iT!YB(~)b $׿k(&)W?K*NN9N~D):`"WȢ%))]nx7D.e)KIݏ2E`])]BP8Ȯ?v؄~]5)m) κzs#)O2 I~KR~ )&=v p )+a0V4zJ/),0t\{+n`*)2v{ҸJ)5[(C?ϼr^)<<AT%">)Kkˑ*b|q?)mgh番,^wE)zzjt?a h)}/T.N^5BͻVs)Rf8!5Hdx(Ɵ)b'al| D!CP)KĿ9Y436/)=N;pzG%)CD5pJ&ZT2)lMWwtт_)Oă!('#vR1)2s9M)OO_&$ɥSB)4 ߢWTU")gE):Viu.)rAZ ƒW+)-,^G6*ZWfFXEݡ̽Ye*4 ).ց*&]3~L&x*+W-9>ՇMm6*+m *,W;c7*,8b0'z7*-T<aMŔh*0@h*;(*8f$9 @ӥL*L4xn!C} G+;h*RZXJWg"&JY*cl.`7CH*d^wK;{۴:*gÔ1ؙLM®bV*u 7,ŎQ{9˟*y}drKGyRa*|b86`ȥ'*dD0q¡Vn*g2YL&*&1U{*{-羝 kN*(hHӨ5;*BVM騮( U*0HnVB*иyP4p汧E*ZoܜN]*ga_r-hNhK*PaȧT*X*•ڲ*i0KkQhUL*0&V7e܋؊9*0!LߌX#nyf*L".9{L *S!E*azc:BC+vd񐏽!V+ pqI Ka̘T+ Y4<;+ *rW_*:Qi#+ n-  F+-kJ@J9<KEU+@Kg*;ANL+rٸKL=;w+"8_K“^g+5 ݼ Yn, m+6kz6Rn+:5%>Ҙ+=c.hFƗr+=v`r f/9+DU{X;aӎHr+9+GZ5rlH#Q!.4+Q;:W ˯c]~o+]Py7rXpw +`[dLM\Q+oDvI qm&)+wڶ>v,q,˲Em+yke&6rM+zSYhdhPJOJ+};BOHCZ+`]4=_=Swx+jqOdrjBCehd+!qI2֮!.mED+3FYx}#O^+?;NjR +'BzB~C8+ XUۄ10<&+YҞ*F?+^+I]0I},+f&p2U잃6+rM Gx+2vKu.]BE}J+Ԓ`%TWOnk]+-yL~Iáۨ[dM+Ύئ;iIMCk`+փ3"HN;s!8+ٽ)".i$">/+oI8ޕ[ /nt,Wwk@<*1$p, %O`'6Vh,!_ViKOcsMp,-Xtr')ݢ8!,FbQIH:>LAe,LB6yxcZ@H ,L͙lYl~,Za?m? |,h%y4Kj6_<c,k[4oiP<hD,o :ht?41So,wF.yu',l8% =B ~~,}5b(v,)oO [, v\|/W,Čd ^耘t M,І՗$%k=IJ^a.i,/=i0$9ϒv( @,ܸ{̠By~qod`, g Xo},䇟l:~I^Qc d,s9Ȗ[t3At5,m,(Pm#Epf[`,'β@"wը,g]\'r=%,PӸ5G<mX-m 7m8w-:XCB` -C8Dr)7KqTTM-J m;f~89Q-P`FpqG-[˖1PH+2wxG C-frR^AP ~-jRpuRt0 44-}o KXRD-hp^fNJ>TD6Ь}->O 6&Ky.~-2]-4T KJ5-Ɣ'd<Ѭ-JHYf#ن-lg7Œ\0.c oGq=".~,-,#^ʹuT. bol#R5zt.t%[ ̎.'F)_<Ev6].Td Zc*N6k .` y.XeP_t(.bqBd9%=ʸ Sl.yq dgNRN!y.ϡc&x֢Ĭ.1wL^~vIV.||ǝJMtY}.񸃝[T@wi.!)Zv]nJ.-HFic.+L,ntY䂓.ǭC4 9:.3`2֋ϣsgH4kR.ق/_sG(d.,?9^hMS.(w-hOߍ.H鐳e.E[p33/!vCK],'z/XL%AF+r/1gltI `-_/ՄMlK[~/%}8zaov?V/LeTRB:/yBWi}//!RқfF  ^(KET/(gPύ1Hp P{:d/*z9 qs e}jM4^/;v̀zP=w[/> 'ڿl/B =Q13$/Y/S-u(2w@~`/U^_54pvU[g/W*Z CVZ$/_dU3j:~/ahlml/hӉ~o:rjo/Sr/mO3:fK_=Q'/m`Z9[n8/n`ԋ]/uYql0Ds!`m/nk"7JC//oU5kg/Z=D6濂~#/||oTz'ۇiY/Y-fw,8>(/ ` .Sg)!· u/\˵'w G24|O/Ӌ$KU i,/euHjv/] %ݿ}``H>/5uw-ƺGr"/љi/^ᄌ"$~Ň{0 Jf6g+~Q 40ZrUV Zu[O0PZ?_|SL?!A0{ Uf0.sϿl-(2b103<qPſt 0@ӝhut0EDl&[a=Rb=l0MU쀒ZG_~84y]#0[z80}`_4JVT0bGʥ>w91͜#x;0grb뇒sBYK"j`x0rQy~<!ߑw]0yqZa}0z.!^=1#0-܈osHȇm40Ð G`>>{D0_P%[5DEi06Kz5`^'0 Պa?,SCy0 ,ss珼;/09\,.)뜾V0s\%]=p0B) IVLw80z7RToq/2n0~ɢڎd6dޤGo"0ŗD݁D50ŠYIjxa}Yh0я &dB=0b6r#XdIA*0ֶCTc<)Tn.X0rcB09҅-0_W~in K02\.Q\`T&8aA0!Y+'8B0lfMPJ =C6~@uh00eyԅPjsv06gk 1 1k((#!qZ1x4>1^ޢ~ȅF+hl1Oɱ^i{M1GqR:ArK4}QG1AkД@ɖa *1) ztز1)xAks% A(C1CSo_1W:9l(1Ddoyɓ 1NOѫ0 e1_U,61YP skSYeٙh1YdJEI'DS1[Jk^%j1kJ[nBF691ntWpUov1r̀fQ}Pm9 1y5+<V[M-1|([.]3U(c1d>MlAyVAi12 G`#71=;X_$LN(1m*6vVQY1HEh Ȱ11j&rlŜ 1ĐE/gV/r*9c41$O9~TB1$( }w)&7nT 1u՗k6?1mG!svδ%;#1ASpa\t.1L?bY ބ͔_2EP-xl2g>]!\/Gq2_vV@{27? +դE܌28A.0 Qȕ2<Oh'n$KP Ǔ2Nf65Ri编2U:Eu֠"NP2Y )א:]2tz#y8K[kLg"2"Ҍkj,L2)AZx@8 g2;VTNO\;2V:$J;JQs|K21B9|Z}U[2jFT>j52Ň;gMYCfO23H#g ?l2μ@1eUDUB2 3Gؕͮ \2h(G~#6-u 22z~F{C:fW2UGɵs!2V ϣT8c j2`L|LkaZٌׅ*2 ^9?+#3 +d>q9ae t1*eQ3DdU̞"mL3A) l?֓Z3 2~_t|fbǴtW3/ g&\>labU_39ؑ{[S2ף9|3U[FqaOG3\/RRY}3],ؕ]ͳ#3hw{WVBܳ3i֗y:m1,Y>3p^qTޗ]c3}GI3Oz ʐ+U3.=Kqxr8MR3LJ]SqO磕$t# 3"ݏ9$i_ <31z᳟3ׂxKxaL[Dc$3:W0X Ja3ᓌk5\!/j?4. 1V/3!4c3KMk4$k#Z}`P][a4?UuԜ 64ANHV!^,NUJn4C)B#17=Z4O)/ `Eٽ&But1P4S0!7a6 4/)4_}6=W3Zn4cv2$|_9~4x$'S_ݶ؎u4zp)%f$" 4{䴪aarJz{bq4=kãKL4LIL.GEzX4(fS-eTPB}/4-(v gN-)QpV4 Tĉy{Q[%* 4-'ڤ:VcHN4l/ϒЅ3b 4 TIɇJo4~%a[m4@ݞq4t`x.0md J\F#4TTW7ǟخP|ϴ4Knlj*{#4ޘpg*|-4ŎF;L,qa;Ү4y_v&u-=4=*ɕl\N;s 4핱F+ґ_4Va5T-Cea49eO /tE4eDҡlMe7Nh5!vw#[ n5h,:ikn^a5# 34\7sb5#Cg}?hQ V(g54'8tT֗-ͪҩN5=Э|:/E^,h60[C5=fs0'6УG5BU#ZGɚ d+5D5 _ob6V9w5TUZqldKts(5VS:-.G׹ʚ 5^7;* :'[gienF5_tMf5`ƆIX?-_hӉF5gѶ]h v3g߱5pwm7Qe.5(Ա%u΄ d454d~1w5WaK} cbQ5 O! w/R5UQSi::=}5>Vz5葖5uk󶽥4>I:6׌DC k}36ѣZ7#F6`Fލ(3^J:659fЗnO%\66PU,y ,ξׂ67$qҞZ Vy6EX- NN?X6^#2U^.Y6cґp {ܤo,6iwԲ,$sCfb6qjv1t %AL#66sB^KqzYՠEd6yӻ[oWp6k@m!}AЅ!6́>&M9X6oZ *Ղ O'F67ZZJV8 Q63UgFEB|kR6Ia m6SC<N>.066@_0pRLf_"6W+ՀRv|Xq6ߴ >^«ds:V{6Ipx6Չ)L}Qï6thi?A~k7 $D&~z75]Xvh7g/hO$7"j^Mk=` Hq7#,&]/?267'ނtV֭Mx7.խqgtS70qR&)6, 'a+o75D>t~Ałyr792Х Jx6oں7<hJM4l:5o:7@MQ1-yبM4tRB7\p-v,~7yV3`nA/^h7\jLTV]n r$ʾ7l}ޑawiE<_7jZ|gQ#g/~7rVN7$u>*;v U\J7RlWie&{8sw-7pcۯ_:9t7>YL*l>7?cQk|*7)B7\ <7 ϤyĐ:l\7d2.6Vwl07뇋(/TurGbtV7*RĆkH7)L͂cx59T781%j9p1 8议;i޵¼ 8RrHd(G,8q9R{;H ;j}8 jQ$Q4K3B8vѾ8#>ni=6Dl80ޠA&m=t^ћ89.n,&F3r8>YM-'F.c."q8\ 'D9g8`p [!C?8crfTsMXyv[98fwfP`pz8r t`UH8w'uֳRW8VkRiNHs:ҟj8v뗂8aXA8:j!]Gɯ8)S]Dg*E̐3yg{8݃┤XJua8m| 39rV$'8x4n6JqU}_h8bzm/8M;>e">8Rmv{<8_0ŶDkaOt8(`gp6|@,8]-eȜwsl86v'+"NMt8ԨдԤA9n\FJ)ij90|K>9ߢUe"F9Gv۽W8)=姐&9KZ=bNP?ߦT9}-Fd΅PD &#9p Evj6Gmi9Z8u*#:B%"9%y6PBc8SY9w,ERM "79p^i+K. q91גÁo Γ* 93!HT8 _9>l:6:9o@7/4[39co Ђ[,w69З3)0.KL @z9͛e=hHa09 $8^;`MZƖ7w9>r8[sLRLdH̟9܅ˑ=K _멸lS9瀊ef$w69U屁T"8>Lnn: @7oĀme':N6L*9@zx[:H+.1~k^f:Xo!|к[$9 n:)- |CJ΃^^ƳE:4`+ :9wXv9M:BNć̜RgT:ORR:RwfH(ih:[MkU>Yoz*.':\TU}޿;~,1:_JKĺja:`6A":g#vKlC\l,:>}v^slL:ma;SZ6:-ph> S1syN:0nwI}C:Q AHH:N*j~TsD:{ͪ}Gls:6IEO ;:XUȟ!6[ 01:0Er,ipnxVb:oI+aϮM|:>.,)W#:[x3l~[ƾ-:q0KM:HQ+Tx?f<,EP6:^jñK, &o:7bc :S=f+iNH9:0&s9o*ztT: O~w9\ 8.V|.;Bk]DIhR;دvA^ݵDZU;  yB d;H=` k:Ysx;Z\GϘ=[nX;fVAR6rU-;pRL,NH / #;z@$ (j[^;z\!:G;|mAG3+SZm7'\;e4j G);rN&n3=};7k];;& kU&[X6";kMy}fPO;%g.wϖu[~;Yح)m y;ZOj-AM͆;lR@Yu`%Jߤ;o.5gq1k;tŽ)c71>; íAeyp$<-V]䧞?P< _8>2nq< fMWB%Ůx4<]*!y(b,Jz=d>BP<)Hpl CVHx<~ARs 3=p5'<&+p R~</~jJ⸒3D9E<1k2^Xi{ce<:z; gȷ4i}<>_e'jGop~i<Jt6qC w &F<Wf,h4wb%<f7]'p~F'8\T<fŁj^;}2<qgH@TjA k<xYҺWTWZ:<ͯnΰ><u]ECt.^z"I<iA_UrL)q_ƞ5<.Poߧ7T~l<dat;X푾. <*5v͂4|l_j7= onR?^n=P}b;(+=!..0sԞG76=!pwl<ٸyugA=HQ}W׹F=K=,ި˵S'cc =MA+hF d|%5=MJp c %^+Zf9=Pńurɚer=T;=.3yY?F0z@=^Bt ͗i=_$nQ`gfYen(=ceG%knŹr=q/QǴ 'eT=NX =31ZwI ,G=A[D=@Ԟ=O%5khRr=5ηՌ74DЯ" =Q@MȾ={YX=@C9=ckʫ=*e]Fќ_E%=[Z%ߺEIpY>=3WOB` sxc=K3Ӵ"3Gvrx= ~<N! =d2X xקs(W=A`37_p/!> OVboHs<>ΡMs V>0[@cw_ :,c>-!jP\#D,V|1>8(Q zY1⡓>=%o.wƠ^>M0VyhkjYVY;>Z5OcF;>l\Im똉>rWwZ&L>s5θDY]Mi5CH;>t糉N#!NSb>aG+v$>IVLRsPbdYT>55yNI U>\D-#>G8w(ͻ%>& cIV̋M->̝\dS:+sf\H>ϸKM<mߝ.stGk>00'&BL&H>#,V2ZFP8;>0pmL`Suu>쵱78}~,#+>M4RQ`>Ț"ɊJ(t1x>jc=ꨴ^+2H >>;ѐV<zu{jY>t%m_ f>h)3Lv_W ?Q4+jGJQ]h? :+.Fߠaʴ?'jdśR*?3v.#UeS:󢫠?;|:# p߭?}Dv_.{RBo%)Go?Aih[?ܪKq͗4T?b_BDD?oqc%ܤΑ ?2L>H|9մ:܇?Q ?a}4,?!Hԛ:ϮH6o?9OBИ%j'%?t|'xaRo?:d _? \'*PT@gC?>X]9@ 7?ԗ잒2=h,?` St]]}4?oHǭ{ Ƹ@i˥N@@vfvHUoʷ@DO[Z@Uy+p&*s@+/MOR@y,A UbWcHӆͧ@=#X@!X>vT@7|>$w(@#BwdSv@@$Bz[*Rx^f>@N#?%E#g0@WQ OY. [&|B@Y Vy2 s%o@aEb1 obN-@oD A4Tk@t,P:V^6wv@z%ZnboT@Je?݉4N}Ή3|@IDžu]5m @r>K:sxBi@tY۴$313Py@A~GDNѫp\`@_4KMN_IfD T@$M!@k!@2_atn2,@Ӡj-2 P'|C0@ޭi h^`ևr)w@NBhQ'-\\|/s3AeU3Cڟd4A1)fZqLhdjA3:D VLܨZMA9`́feP_b-_ A:h) "JS#AU-~m.dXzAX|Bf$+%UA`垳Hi&KAkoh׃:o̘Awl5!ӐfARN$Sf]k  AvP)O;A*}" g/&zӷ(SnAd_̻rJnAeTC'? gL{ Abɉz\1:`BABR* apZ4A5c=BRS-,|NAޡ0k p S-ĔApY˹[^R'ǹ{AJ5@1KVnB"4*6* xhUXBMMaH>4R'B% B.G5p3X-BZ^Qof 6UxB|f+!א>j9J#B^XSK749WB- *4YA~\G7BH3_ jM؄BT3YOzr~0R>9{BZPw#"I:'HB^@8#lst?s13BaZ@޴b QLBf@R{[h M}bBg,PSԓ31L/lByb>w6bflcB}Rc9nS-)BB0ixVqBa:j!pRQc!ߧ_B@a+ BETB!">dB[Hʬ`BcUf}6a,GZB=^X]j-xnLBpd!f! Bw@}q˯rV}!6Bt@\Q cC-Bd*\3}tB8|j̚&fɏ"BA!6?D,B1xm!*1B{£M뮒3q"BPj.B) r)B B㑭,gpjv.B}*|uo~Ry*98B C%t7/{M @C ]GP*k<|+'jCb l7Cj8Ćc+zC#bR{u&p#s"^cC$J<#hg/f!V^C%A#b BbC.,T `RXįC0 Ϸ;NC@Wӵ,=ϙ3CGGx NˬCH( #_ҮCIԬQ.T݁"L'CQpPҟVoŷqyN] C`A.S3 Cc0iI:֟glmCfOseeMCt"bFNC{B-&+"ˤCg"esId[C2mU Y#sn CQhW wгFR`CŅ[ҿ"h% Rɲ wCMI;>VC.灯%ac%3'C6YF0ap4:C]%dPJ>\B ,8C{*|Gk(.CK}ymP/tD"ꯇA3#q0{U'D$aAo;YDF[8 (gDK|9J2VmLʄD L BZU kDfzӱ{KL:D# 9DtLis-%4 SD&0B8OekSUDB9@&̽t0Se$ DDw60:fHDH ^Q[ahԿDQ*ՎStJblD]NqKU9X\:VRDdU]hpD0&DeXײWhUkD ED+vgDk.W0f뜫D@al'()BLDM9/ԥ /=*ZҀDZ<[ bnlۺjxD ^̌J"6hW9DJ<I$R}2D}e$!Cc_wJD ?A;eٖD7vzSnA+wDkc+ߨdm%=ND,M8ѥ ABXDʇJɮW00D-AGDO?'ˇI>Y`D/"̷ǍRDh6"HW,D  /lAETDF>ɛVDG; [E(c|z%Fͳ@E2N:سe=v/s6En{Dܕ&DBnEoNzdzl6vciXEqS(5ڛQ#\iPzE>djGY{S*TEiG =]-uEVx5\1I3)ǞE;}XёTF\WE܂?ſ+XE~$*.̭lKcEdEy KEoP0&tBܪ,^PE;R hisUBcF 8\mcz@ .BBF% f4.wdLE]RFmp^y<}o{(,͋F!Nv"&}ànF!i^$D*g/yF(LW@)'hD^F3H(mW.F:/*^Lp^F>tN}č%}F?x*U 9ڭ;0jF?'#r/ d-`lFFJRuQ_"^[FI\xH8~6BIp.lFQԪ ݜ{YnFRɱqPPMضѭFZLmaqV<<şˍF^|eBo=g"kXFc}֙ԃE55aFl Oq;ASC\PXFm|Ȧ Ī~EaF}D-7SYa{F1fc)Ze~yF'|:PH8´>P}F. qPE1W}FM6por3;[KCQF$Q 2,PiOF.]y;Q#wFgx`Դ:rF{C9F` ث9 '=>Y_FmbFkB|rx CFȀ/+? ~]LFR:x!]Fԧ4e!Fߠ˩c3AF@'^La{0FΨ<FܱalAXa@5﯃X)F$ )؇%!9 Fm2 ̓ @rF FiO%uGN4{fR" 8tG=4CF(n,L.G8G8 v\lGVڷaɂ%vlGFr2}Gq/nƔ-ÝIaG({ˈ%gRw G,&3vG1iHV[F"U"G7 ޶0K%iiv1G>hs,kDOKm5GYh"t^&b<P9K2 G]dDoQhGi[ K`{=$Gl*F$GrbZus?Gw_EI@seGBco[R7o"Gul|h$gíG<M+RMƶ<G|H c<hсG'{QMZ&G9G)GNs@uV XZG8i VE9{G"S*! !a1.-Gq;lW)7's _"AG3M[,`Yc!G*5zBGi6g:=PkG$z{-0H"neL-NH:`p)VXrH糖9==c{<zH\8m S;NH%^FCi]6uKKHg XQLwH^F">| |@H  J3}%Vj:H |Ky$>< +YHh̗[?Zs(A氎H kuҭH}meH%0fJ! iH5lݞ5lޤvS=H7ձΦ1pH@HDK kNMWY$HEK—䷵L Xd.HY %%#B#GHi #5kDAH{x2+%dCធJ\H z C ǫlHqx<?,xQ^*H:ϟ,D}H@G zCuH/ !^YHIoK"&Lѿ99uHmJmɏ]H񙑮$ |I=/܈SIgpT+qIBId4~GuvjI&*t]`/䇼I/i Dg7I cP'PxK3]I)&#Y$s8wI0KޤuI0EKI6ōi--~o%tII>\B&~% `hIKW4ZkIH`RIT-ǒή+tIUP7+@vSݮ9I` q6*]$iI`Iue>"=EZIbnXډT9.0BIhH:f~ܠ-}Ij(C>-z?(Dvv0Is`]nҚ$ ZӦIt#ͧlqP:Iv !d]hkۂlIMR{QjhYILؤE1IHLwvC4y=I6hsc8T>IMޝyny} ^I?Q.W3O.I,,6MTIO &FA?=IKf1îO{db`ISfVb[ JPI*lcB,YpPI % =1}r"N'c8II.謜MꐼWgIÀ16jkQMYIx pVZvJV JN J#ORφӯ>tJ)+f1ڲ(bjJ1 nJ7dШFyJ8ZSq{ CJRG:xœ[ho_ɖJRwd4gB6郝JRJiE:7FJWB嚢DaJWhNNf8hHмJ]R))A"J`dvw_cڰJb fd)4S܃e?Jc}1-2mQ? [Je}Jp^!gI JgޏI~j'XFvrJh:N }$F1Jh喏Tle'3mCq89JqYψ\O)XVJs.\8ڐk;TjMvJyUDOsb}iXJQT*wCuX:<DJn8~)nRmJЦ1dJ vy<^J̡Es;X}>JUx#t-[·G Jj#@COJ'cMn!'0aGJ(v/cB:}?EPJмȮQ})p̡%HJ?{-LHw,Jbbkdm"'׿Ju@bv\(`JaJvBI5UO JE FjbJHf&7uBwSOO6J|YAĒ=jKp q0 @%lr@K iPۆ0V݋cB}aKE z?] Pt0KN*DM5T|K(5U?y*C|@plK8kpxL?y3K?J6+QNr]bK@kO,V*O|KAW=1̫ )lwY9KV[cޕuҦ<NvKX_Qͺ6!']v*eKq^uA'ɔKyEdV hyVmXKae$HcuK!eMŦ9=Kn;+yGKQ9KBR9J|GKpmyiI)NKp"3Jdf(K]X Yg1vDSK9xrZici kMxKvt0q<+HUK~nze;4?Kv,C/"‡K/Tj: DŽ<KNʏ6[@KJ,gл,_ZNXKM/d>SV'KGAC զK:f_|ݎK$ߦ&ؑvBd9mKn*VGnL|pn161 ZaL{h:b`@ם-L#rݎ<$I%L(qvihpL8y*KP7Cj!@L9D.;b=LN4g3fWzhLQd8+͘T)&*OL^^*˹w|4+QjL_%Hnl>ӍTLrPt*L;TOALu Q/g,}uLy[/2LrrdL ώfʣTL ƒ h#Y,LiYc&&ӪL{U>wp'[@'L_L32E4VyԘL6V}=#T{ͻL!ʇ(IgiLaN>#LgmL_jq1:ÐLC \ +VmϲLz5tDT;9 `LQ H=΅氫M"JJR7 qM6GbwTU DZY,M/甹xY ;I>nJM;ŘVlҗ:h}ML6r--H7O=MU-,3˰!fsM^G݈HUPܾ>Ms"wQ2w,-MyZnLдhL_==Z 4MsPzU{~M#W\6$EMUW ϪmoM|/`OHLŭM@] %O"OMc.W?wY Mb,pk&pb[jM$<DKc;Gh:sMuU(fvMƍ b33bʊO0M<G h. \Ґ)M YZ<6<+&Zg3Mށ*"lcA=y\M.U@B񍘈MfM~UR`M*;J)Vi[Xo My;Yɱ64RMq&Ah+, 4]M*eqʰIti UufNX<-|lG;&N$i`9 N1Q;Tqt#1N9.LFz m~CĺNKy O<PDy`NlhU990hy =t7rzNo6dqm,+HNݷT s>$a>NQM!5zFZ#Ns\ս_N7rNLzLN49m'ر~mN"ߜ|l1N{fY@!{U NÖ_ /v "܅N*&BU]gqN>;ZK5-1?N֬30cT8@#N]9''$ON֫o,#=Ug#_NnŹa m@=1[N/DzZN\_NQr`7_O VeÉp6BXTgO ݗҎC[ unWO<_Rp;O iXEf/Vagd\~BO"rM 9iIv)O/Zn"x%G!O7A ;j@9KEb$O9_]&=~WujO9ytm7r(O=xE\YZayYFݘEO>"# cJO#1$OS Izu*EHO_VZ0QmrZDAOba $UFXԵiOl&0#7OAzO3%ݳO9w?]56O`9Ӥac.V]D'tܑO@zI"?1OZ\^_V3O0?™a<,մ%O#n_뾀I7gO"%O 0igNN`/GO.L랅5&9:~u(h(Of"S+~c0]OL=S9W07'G<O[\4o#IO\âx0Au,(Pwq+x׶cLdž0\PsĬ&eRy(P!T)Uݧ|P",qGd $kP%9ʐ!:9ccje*P(0ʏK1&w]P6ߴ |,cy2P7/wP.f5R"zPM{}D-0θsPzI`)%/=dnP_Pth۔vJgd;)}kPi}3uLFbz>-D\LPNp6/_TxP~l4)q@$_.JP{6D i6WVPC^4 23 `/6P)vTIbOM/Phb$ yU+P'vTv۾zPz>V\'=J7P/|;*0_;_lLIP)d1{twלP2'~㙠R>QοNb?'nxM8zsQԤgA;8՞^I^BQQqσ6MtlQUa+e27j5PQ3b%I vE k*QJ 4%ou%meQY d>mvv%zgQn7\k+zsn:TL$ǁQ|8/phZ)Ǜ Q,|N1",߽rQFi(?ǟ`D;,QC 7:QR8@I DetQm$ jDsf'Q"A~suo۝IA"`QPO=$K9QaZ;*'{QN…N@BZN'X |Q* }?vADQ«5)yOw"3QFCYg΢ KOG֋Q, x%Ro.)R?yo%R (aC{`i5RD#9WIR ˃_n]jx S;R$Sc͕ЄkR/о7n|9gJbR1$ؐBGJR@Z3/ \[n'c 2RCT뙫t,ٶ<3RKSғ&T\'fZRXf~2..1܈Rs[ȞZg bR~JLHI}?^j{R ]h-8~Dv&RS,Wz0GTqRZWAV=Ruhs<R ,|*KR~#{adwe}NRӋC"H>yemRٹrmPc RZY6Q+nRU $ $OR䃺[Dk+ڎQuRj*dtsђބBSl1XdSSԶ!JMlS%22 ~-ѨnS'; %CƕS-B)w®LS4|W {}nU[hS7C9`eŒ]\ S87!Qr]vr S9LV,L* Z-wSHW^@`zkJoZ38S`%wõm q3SbB |DLbȠ lSz`1bx'Y˅S}ZE=Vy-/HS~M7󋓟 ZdSvBP֫#vReS`/A/lshtTS&\ەvhQAџUSirT8pSwskήC؅r§CSft?@t yzSɷL"K?Rb6OyS=2&yJ 2iϋS_zB1k'?CwS̨̈k }K!xU?tS-(8V|La35SyVaDu&\/]`k`SoatjN`sTS;&#=pbLu,Si҅j6| ?T̜'ׁzTTOe ?X H8 /8TL&)ύ|_"T:UK5DdPX(e|T9imt6AT$c[C}qF\T+$0G6EMOIa(T6[ԑMM$mAT9_\;B&iV0`T>6ɴ_)7 ]fTKF%Mdu}TTO.ڹY:TRW5xj_a~}T\f]XM".4.IT`cxʝ)K2Y|Ta?;IBEXk}Toᔱ#ZYT`P 7W6Ti#2ېah̙T'I_|˲\Y (Tb>x7[u)KTAMhM[T`NxݴuNTvQJBkcTp$ czyrsTa:kX{:T*Tj~O3M*+Q'Ta'7].3k7̦T؀^O֭6}I{ɷ^Tg@aYg*Q̪T >bL(KWTrěH+VjT/+z UVZLt_F\Hڧ1U,B!^@aAU/M>_7} U7;߽0^6 U? %cMzILBoLUGZOl^w87UM(P\nQ+^:UNXCneTLPfMUpI@IO!@U|0UnlFG3T}U~]1úݧ aUAw6XNMU;i U$ w7U z Y#QHDUkѣ0"{mjѮU2o-NG%CNUƕLy`ݳ3N,uK`աUNyY l_IUԎ*5 "caQhXinU0{|_$e3TbcDU8%j-Ȟ&2V9)lPfiDLV !kbr=c!{3V{RR1cVaЂKW%VuzQJV$/◁U5,0V6JYadfcvEt'_VGڇ+'[VOUis1I)VO /5li3rNbVX*kx֐h MLX<VlN,za+$ VpϹ鵟 &\%ėP V?[l\џbDVZQ}YC5'Ϙ޷V}h Lϼ πjV}cTjke Vk ]}p8HH-6V ;eCVDr)fzW 5_VϔЩW/Ԉ&'W+D5>°2W/A)Y&3WHD<d?t9WI%n]"[V-0=iWOhϕa3qUWI]vWY߹Mpu[Wk)Ly!= Wn_(N@~vWo>"fhv;WtmE%1GnWvBm 6wǣWyoE@ߎ2aCP[WuFx&]%2>W/Gd j!E N2Wk2FFw{Υ W䡸sM уWW٥HqEo!bW02zV1 /c. 뵫WJeoF/^{^>{WW ].!5pDpW٤!@MjngGW٫.= KDe0Cw"WF fGo+6W`x׬ B` !|W^.⟐Jw<W^$,h VW񌀱ۮУ"60Ju:WLЦg8ߏ+UXĨN ZXąt>sX ^r x wQrX ?f@> _kjB2X00= xw4X3(Nw+o?һXss_K8vM^ʼXj@MLVIX-V|AY52`aɦX8#$1sMgxJ-X;٠+ό}i{X<΋we3ǒ` Z+XD{$^y|w9ވmXX"H??4ﳬ0lXZ k͇bXj[:ѱ5 xc+ƙXr['\zMMqI[Xt7Kr/8;ͶvX m}ZX%R9u\+X;U0G8AG,+XNxB-BXĩTz&i|Wi5X<LX8m.ه!X%72XD|yaXS!Qz-XC]!1sq3w)!lX(b?tj]Xē^PW=2&U}~X es$a96&YΎq1*Y#]6C͔MY#y6 \8.6Y)lac&-.'\zޛyYAR;r"YGq: 3ǖ4/6iYP8nuU Sry}w2YRO73 t%X["YU*;[흧Y]~83Yh%|,!Idr,yYo ͕O9<lYuk $ HǴȷ`Y?iٚ_d@3?y{Y0q*6׬JYErBfSYխ/ӸFH!uY[~uK;jgPY瑱[q֥P0uYYQp#d֣ C6߶}Y ~PUhV0Y<M^`^iHYŨYD7Eߟi(Yۆ »'%U>2БY2oMa!Y]o[.TR:YE6Y'Cg'1bKNIQY=aBHz=Ͼ6tZ la7OM0jJZ Q<br+oZ If`xZ!;H6R[Z׳SgX$Z-+cZ2ry~iDaZ"/uK>(5! Z.dgY ɗ5Z279|~74!Z6C9b vxSj;Z7xR-**RZ9G)!Z<s=p 9fZLˏ(E8ʧZP]fb&P; ZXNB D0l:-ZY8m$v!juRfd$sZ]\!u#YGZf1{ S95oBlj@ZgxC|vRQuFA/&Z~=* :NC?w:]gZ&tB翗헏nZ\'PV#T*$Z)@>ΰ$/ZEex83%pu<Z.1a$1usZ]+TDF6 Z|k.lMy=3ԝ-ʮZ*p. ؑ ZxZk%99.m&iZ4D'o8L%fYFZ(~C_]z,GbZZȡ/P-فחZY~Pn"0aZaХWmZjmxx0VM`P[N95k M\+_[-87,Mcm*Xl[i->g#9r[/"cP IKn[d` )@4Du[˪6Q{Wg[!G% RP-m®Ua5F[,ڇ\a2 [4DYlϒp5B[8Ӣ=!157dMP:[BmE`^[K$۰m^,H_V[WG%S â F[h{v>atig[n>jfZ7?[o/Ka8tV.dI[=weFƚ)Ee&[yXH=0[m nO[+ptljJړl[jmf=)[[f^HKO[\⸠4 ׉[[$I7ݥ]:Nӆ [Ł(SS"țq[ɱxi oE![kXn M!K^[܋D[&>'ߧeP[ښHS [$D`p ,\"zj/\^[˩w5%\3m6ū4BJ\?ĊX8 w\A?H-$Xf\h..kվ\ %z[ # BcA\/RQ*O|Yh;^_\8/K`t_m-H]?S\aw4TEƮ`x\pe_`֣<|*\qǰ*<V|dǼ))\sgJ:.[\jё|@{\;BX‡kc\oK <Z̼7\̺%8^ׅt>\LP*yP6zu\["(*MT\R`)BiZA~\%)qM}k\MDJmJQIM\#B98dXaa\2wѨij}=\ʢCaFr')\v%zRzI_v\Κ8@+ a<[\LPH~;婑R,\3xWsN|\ߘ( 0s^Nq'\sR$ 8ˎ\`v<0?JT9]2š8/--8G]eml\ǻW<kH=]Z?,(f!](7t5Fzd8W'Ҟ]5*DQvc]B +˔s ٭]JD#- ^,?`a>]K5!hYzDF&[~֔c]VM!3,7Əژo]ds*s_R o]g" P7~/C ]uLvV(+:fC!]x7p&WA˚D]62 u1Nim+se]Z4DWM/} ]e\)^d㘵:.xz]-><΂;]d|B*/Eͫf2]% ͈rX{H|]lXq/hG]HiѴ`Y'ʧ,])e'|xX]"<h<q'J]=_ЅRt@ם]%j _u"]i<I׺ ),>5Bj]c~64{09?]uS'e*uu]3~V8xiݲF8-]}I%q^m]yC[q|]t !}xNU]ݣ4F p6(-mL]Ё}`xtM]g'ٝGs^,{{qF͜~#^>{ag^@HO0B[ul^V W6>ȯѾ ^"-[z$,RQ7^*{7)Y.q^4"5>j=^;/ 3!Y}묐nВ^U::D K%^\nGlTpt^p :>`:7F^svNs1?Cs^z@ˑN£}*KY^s?F3-"P^Qn8;3޸ڧ{4^4\^M_ O؆c^.(xD I \1|^tLk`u]1a^^G5ns!3.+i-D^E? )#. ^# 3pSO^;3K9TL!veN^X(><WB(^I7Z$}7ԍ9^GX>cwQClđ^+}+ v]^oJg 3L2 c_fh۾cH\Q_ ;=.1CA_ĶWNl0_7i-㕤+x_"bWěe{X_&%c?IizV_,xיM 6_=Yvn8s ._A|%wYvΰЛ_Z7~>#|&ܾ__-t3 :w*:U1*p_r#E,ЪB e_yd&3@4j_T>q7L)Qqn-^_1_·=~$P__ߓʅ Q__uﱇ&R 5`5_y;Y궀_w3>V]ro<_մ 1z_w'FzHHWg_Ԛ_{CGfuR`_D4OM5dמ<_Пc;q;2N@_:Tkȃq\_^*6a9jZ_Foc1~*ԺP`J'25e:<@M`[_WG@>6`# rz@ʴp|¨ӳs`.fKt7jgXe~*`0C,}`;gA8`8|0oPӕ̌`< a>Zm`CFnn@*;`E85K ŷ`J|Oca`b`Kf8h@ju*d`ZIpd78M)6Aʙr`b7yƮxr֗,k`m)X, UM*o`m҇x䕮PbΔ`oQ45Vr" \`s~XʔP-x#`xw^7#SMΞ`yơ4H|#CN*2f`{ >>H{HXY*`9 tc]Ebe8zGG`ì  w`Hbx_etM`G|y?ء$[`XIMVm`#>A<]wp@K`֬I8?b$ o`9bc\oħʘ`\; f`YPF@BïI0a7tGAza2]mE"a=泞~P(af]8an';=aQ!Ur{&fC]Ba%יRb:?\RNza5oIƒ"6:xa7P4dRMYaM?\aNDQr]ʈI5|*aS5MF"FXl#k{~a%s2aS."Pa D j {4ӫa- g*Za{A[+{nEGla.L vuawc3Ro~UO;>a]dDmhvla'&O\-{FajPJD57S%=aN'+V`7am6uUZI%a3wLgTAPҰ"dڡaZӮ1Kڪa}T QiZ/a9_([KZ Fa; 8Vx{˯,jauQ&U4ަbZ(ڻR5urqbBaT0˱bM?)b}Pd.WH@.b OĒYc Qb-X1GaG+٪b110z6܆|n+b@ `%(=s'K>$:b@-د6cbECY;wܞ3!bUv2fڔ-IDbYS0rOQ#EbZ2H u+Y8rlkb`PĻ-,T,dzSbn4bdC[bvt=4ɂk$>ܭbw/AAZu3։1#bbg0C|.bdd$SӁȃbla?zxgxb2+ۭhr}0Wb^'`{@~Ŭᎋb 3c"i喔b BO:bpAo由bý|jCmZ z b ={DrH 8tb`W[ĥFj0]boz[Q~Mb%ls@ٿCnFb>GuO, 0c4 5cs 3c Fud_Pxc3?ʼnOj>ل)&cm!%.  ch66mWc- Oժ[xZkc7d8A8:[c8MkȔA3qc:J 20c;af_.o=F@S+cJv ]@hIcMI_ ? cRGqkg0ەc_&f,1LclΈ-t{rbcl~gTWVG{c}`i{Ě)y BdJc ?q!v}c\2Pae.|ɳB7cٮ$g$+Ħlc֊/74 j>cLN[iqc"|YEXϖ Mcl < ӻhcp/_bKUcoV3W'*cJ;j{K”TŠ3cmkEI3cÍPq_^plUj6cNjB?:Mcw;u$eicmPrF}߲cFr #޽gEfd ?l'R2<KrdֿpM^| d2܃5X;ۑ?d<zvddHjOi9OɨSevk]dIԔ,^'dJP}δ/אqKP%dJ)Itc@2vdY9 |]C+ayۏ`d[~{ݖ$VdqyNlLQXu6d@ S|Ba$dC"__m@I%gd P70%yV}dX  nN&Ƹ@fJRdVKJ@~ zd[ONTL.HdW15C3midQ{(|-;Pdٺarwjf3^KdJ_~NCbBd-b ‡@e9a[wE8iSe#eyi6?qHR`XOeD@|Uq}eK>t ަ0{qehIL9LϨabe"CB8JMS8Ye,덴q8&*>rWYe265W]>$r_e:<,S:PbI.ynGeS,mpf@H (NeSB:,x ǭehL%#K$iMW4uem։jU/wIjes@ .%|USEUewmղΟjeeyU Fg}@:ư ez$]eҥr:~]|Jxe ;0Ky+kj#cɻe>H\!#TZ]e!V>xjԏae# ~<B+1e;yTKbgs?emZiL"&4tKe.v.JBE׉e’*|' ݩ-,PeØʋO\j"e#0Ԉ$:/`e#xtp F%xer]'rEL eWkB\mÉ )*8e>MD*PHbcL+Le=1 YoSi0f)db<20]b!f02s4<,;-kf9<S̪Yͩ;fwu6N.RKH[f r}#KYmf5=+,j_Qf78MPT\<Ѕס1fEFb]ćN/@fH fVfM* qfLG krU`6/OfM*8P 2WF+fZ}ӃRuyYt@Vf[˖MSym]{fh;wk`W$_%efr|at h(!k.If|}36 c^̦f}T 25f jm#GfdSw~6{?f8vϕ=%'ѝ'OfǠ ~ځYb}VTfR*IL.{H ftʩ37%D~9f:Quln߈kLm20fX´ȫ:E;f[1_ 5o}f;Zjy@q,f.餄4FZ>fڥڜtSܹ4fڹ Zimfx?o,k{nf-@E V[%fqvCzJenufX۵ZaY);fB9HA|)fO= <Md!gڂS+VIEfg*N8N{{'~g.$ofHPtkgm40%(F؝ލ9g8"2w\4UR賿gxbz#἟j䕐v|#g)8(ٹLNYEFig+}~g6)9;0βv֘(g]3lG)hOZg]`t|sBx:1TgcUuwQVUtQh/gw<gje*Wmgl"_]eBu-A:ngpyBXgf/gz1>࿛Sf5cygbѤv"GZ{ge7ڛNgoL1SYpg(_$EpUیQg':d/gW|(ЭݥgrdSig8/kO:'<gmC38sk||gtYVIa<fah1~_H*)Y⭳h,3Pr.վk9;hxR0;Ɋ/*{FWhYdx+Su8T6fhVw 30G[h$Ujqom֮ h cuݏDZh#ٽ@EXejvκjr.Gh&@>,C(Yh+7GV-5:6h.Y=j33h6v]Q `b"/$h8}cݒ},veXzJh> *q$՗K3h]anVbƃvhfv4,Ƕ9h:DGqISLch)hoJkձC/#jhF0(rRآ-Yh<> $%vA.hǗh I7v>̵={v1hDdQjMj̤ļDIhZ&^H Tqh,K˧+;-hg mmJ얹Ȉho>@_֜SD,ՇhaN}8'@uhmӠzUBiܿҞXr:}i:cT+2 k^i)k fxo aLi* W@DV/0i%_k | AKIJ i(-90YѰگi-p:U:o[qi1?:x_Ֆqi2ܠ&Yy[K\i8|n<Jo ORi;E9)GiS<U"tiBK ̡;#wiS؅vU`iT eWw:{u%iYA@0KiY }λWJFi0B&8iQrp6yX7-t_iM>LB eM._ +!izBh:XH,gyia+M(SàuiOvW^)թgri1u+rÎJiα6X_CePiu( Q[i&MZEojVox$t jRxߕ}lx#j 0OHh,\4hyO6jFǂ_nΊF7j!P}[}!ejj"YN?.$ qSj19#Fa0j=Axoĥv""(~ jNCT-L~h|;6W?j_BYg~lL9pjabBƠy]t44jcZCBD IuHіjm<,0pfOPjp4L:&I%z/Ljn|qCLe1/Ij',YjҺsn oE? j(D>N'<N0w4j`]0;,+,{j!l[@Uɛ9|9!qj׾pbt?>j P, 8j .Z#;bbjcn{Xi`2Hjу' [7&;ތDpJj>]V:7@j޷'W!l}t̪E.Zj]uBbL6I1$Mx4j8 2{!!i:@jEdxlNNyqZhk?u84maRj[DkfQj^+WЂzm٧kj<4wk&-VYm#.Lzk.+WEb<uk<s:,~2,\l]kCW5]oa@^UkQrQ.R珀UXkn3yDJN3%T||8*'kn5I3œM7hdko:nN{VLku&N9*^.:)mRky ٦"R~|:k}=XuUki+Gw @T-F{k\h<|!̢&7k &̷UX_եT03Gk>Y)xE&kÚ~28)34yl;k''@ .fCk!8}2h5k4&$ \y<<kçȎ~}kğ)/u!#gakims?&)KA;kxcg}X^9ݭGXku[SI Wk_$.?$xYkf\jLBl i &s&l.m'WB-ldlھH`לahl&搭mn}0I{Qdl0l|~afRdglP_/߫B0QtVlSt`P ]XMl^'-}?>1l^!-Чኮ2f~y 0l_ .wt)UQlaK3@Qۦ!lif:-Lm0v:8lluhUłF柞lTS}ajL<9 l)`ُP#LD&&lFKw,V,P!2nl8)ش{emled!D~!rT4~l4LΟ}F Ѣ<l֣%!(;wJTHnM6l̦#FxnS:T2ltzY @lݎXԑYG.)#{aDl 3qF76ЖKlVjpվ`y^mJaǔNQ^JB{(m&GV9aX`GIm'C O4t՞PXm,sdE JOm4"^huNmCЙaf@ VFmNAڂ1tfE} mV2keô װXmtUg@ 6zm x{·9<p`mTgcB/KAemŔ'&xy: Lk(m˯w7 2Ju8mJ19$3@{#>m̸YPkv0 Kmш]fYdBk?Mm?9/xJn%>;Bm%dM}/ (m`HT,ygmjrR\/Hn^!@C_jA=n moLϗGn!#=#QS[n$›nj'*|(An%׫22 Ou&nn.du~GM=n8mZ>J]j@nD!`ЪnNtzO75I:-<nO4?Y('a-X|wnR(5_j&v nS\Н4%sxniru򭋳rUpnr ~{|*P~kalnvV8B?Siöۀߢn|k<"qA2~nZ]U,s7Eaq#;npUǎ2v+9!nwX cQ:Anfo g?vnP5?ni|JW˥jѮn)YXD"z#,-no1AVy/25G"nޱ*ʺ`Zicnh dY<_`nv[̟Ƃ}o*f"j!A]q<o/xl`+o{/ ס)(o-rD)`}Uo$V3.5VSO'eSo+rCNy.o-Q̂&]qTo<WO="?oC7 D[Lc oU$nGtxeu=OoeΌ*κ/zٱosi23dnԠA ouXS)O]DJoz@4=Ls:\o*|L7ħLoi>y;Z4C1o>K^C_' WoZ޻dA$٧U%o%.a~ ToV&pQ |Ro@T6#$UO.NoЦGIX ҫpo!En:?Z!oC&պdkmloT:^]7s04Jǐo4VxsA 꼄oU\ etleZoӟkň &cȁoդztΑ+a8:Tlio@&v}U.koޏzLkCzb$oIO8N0']A.o rFG6 ̒(OpY7ZCY^m5pGfkP=JWxZp eNbj/GpzX$pL@/BR61p"JxX"dSp(k4;G5/pE:)Bz5^./pRZw@$4[etӑפp]NoZد,$WIp^IMj\KMpeq`{"ֽP9 Uped0%u*f{pm0'5B E]^ }po-46vVpu&(6^4j ׮p~^IƟ]8H`cpIU>Qfhtp\kY %p>o ul@+wp[Ia #oVVp$N fKM`6p5;?18JUI4puR@`6"NpŴgAo[;ɽ'pZ1GƐ帠,lk'9pf:P^^pR~I}0IٶU̦p[Yӫ9B3pq }*5J)XnpzВSن 3nGp㪠$lauAD4i @pc=L^z c^?p%(9%|a0E5p<Lt*%#T\HpN{}Zq/)Yl  qJd6iRvqBo$Ѹu /0 Uq,EQM[ĝ-bq6e+_+p&e|q7@\?qRnxΨqUT&-_qcBNf i6?)kqdJj tqbW+3qe0_LyO qw8_+YWJ$qxKtHn&Vdqzsw9k v%q{ч4Ry&q|ut$XNmyq}&u|VI#fqNI w2kqaf?}'ėq8_ ꛠ\q!llHʁDqi1qBGDpPJqph!r ]qLyBIӥ]qG*zڌ?lAq^lD, : zqεE D^LaqFc "që]q\+ҟc='q]w=G_ F֗qAR./Cuq?deQmk$ q&Qt]$tX qsR]*{4ZF)q<af<~ }O4 rX~ǰvWVw[or i, 2^OkchZfr$B["0c3 pr1rߘ+w/q舴 r<Mʤl bkb\.r?9B:m]e"rA!_TE5⢏$krM()~\rUR!lxlCl9Wen<Xr_LJӒ FKrdMX Al)}yrfP62[Mrg] an [arlb/xײ=2ar*0J˽r*B9@?m!' r}ZYr`;Om;yrb9-m}aYr lpa/r]U ˰Vf#r ?Cm 冕p5rҽ?RW7-VrЌ :sUrזjVP/%Lr=(7B [㟍r*A|r?d!ַsGcF[s*kM;Mdjs/LHyC“'s=3mr3sBMA*sHAeDJq[fsIv6]vT,^PngsOBPNR\UsQOSfBcyC`sVs锿$i)[%\ s^m} zx 9x`h"_skDJ)u?s^ysk0n0&tY>(W_tsnjmEq'CsuL*-J-Ps{Þ0&TJƒI8(e>v8sPVov1N b sxj Efsu$]bfU'X ysmzQ0@? sgrF$ )s]h.vn|scTI D6!s:g c݋u%{ps Vpp`0smK&﮴5-qRswr{[pnzEMs1{|Km{Χq]htH!=[emd0tFԓcBt>åtivю|YbFt?C&w$tΨ)7Ģ6+t֪OZə=oBGUt$֎p&-t;@) qmL .tBα/1,>$tFdX^äf/|J֯BItHЙ2wzInsFtd|uydtf=ĵ|0tĺCJ&`4Utqi85-{-kStrYNP.YttSUDkfh 4%UtҪ5T#89t4bʺ,ٺm#t V1> ZDEtv%fӴ,g^qIXtOgI]XtGifuZ_t'-+99 t؝?:N-&mǐtrhUG`tt]bPv= xdt7\Y-wT,tIٔ 13 tlrlKP ixz@tndž1*^3ltJ#؆a7]{tZ<Jk7txUR(K˺nftnꪃuu\69eZ؀ǻux_KhreuUJ)!{=#߱uz~Gפ̪ylTtu(Uf-/u+!%U{B@osf u.yovP KZu?y}fydc] ~u~tɪaڤxWunشGCwu.eAz-Y5uŏ+ e2J=?ucSH15xnu {,\njKuh#t<Ȱ$>p ~u,*Ҡ!i y2ut zȚҪJET`up0Ss G.euR@zp`umCoSgsM9v |N o&Cov C3HN&uv x64\Cj8vUC? J  <s^1v#6q~1>Iv2Fɒ0ؖf\v5_JS3WlvCٖb_Lă5^(qaviKZP]UGm5!/dvp@(~8F,&v~dc%~XvVi`N2v`Qmvi0*KE;tħvʴ:ޕ! h!.v D< wcjvkUÏB vuH̀븡oQvïK 2}15Lg\vأ1MV͘v0 y$Ob-wɟxI8vlw 7.6(Ŏwa;jjMlwwY$WJ4sw>Q$-E$w1l`ם GjL<& 9 wNS>V\{CN\wTĎ&~TG?9|wXbtDn3?wU3=ުU%[wyڂ_r0@wmw`q3}̭w;CW)]wdep  Kg5wkM&\GFգK 3Kvwj呌1X?nOwdQOy\wٷ8tF$DX( wݤ8ݤo8t#}6w޶P^|e9g/wF?Hu8!RD]w|&!Ib k@צwo3qRK4e'Xww,(_V&RWSMw36f hN`O^DwP%K(SDuqwȟ-:' E x akzSA{5p\x(UvJL[|G}\0x1Œc=ljaP',Ư]x?BxP;ܫx@Pg^g'Gx@+\p]nmxAH@1WR.E>ʦxCK f6I̐ԢPYxYk32c$BxY8)!yߵ‹Lx[ Վ^x {7:x_v{<TeUxeB~^qwE|m{ zxxiL!o=, (z/gg$xxwnx3V:s7bcxy@+U.sktNx|k EZl$r/Kx|a%<ZT:?2HP{kQxfUOx['.E xd~qp*xDeVh,2x%} iq-R ѯx̵rsex#`dxC=𳞍=FuxNY7d[mxs,"P:Nxb-7Ǧ/+ xܐ$%f:xHBr,xΰjY>*elĊӶx5\((MJ{$āxުk@AGfy-?}.:gq\;My)<h![z@by 垞\b,PxsyK㑎1-9^Dzy$wPg6S&y+,.:F_¶'y?Ej_2;TXyDZ4n<9ԑDyG,b*̀@yQi/Ir͜"F|yiQ^r1 <8nyu&xe*^<!TFvywZXq]Cxyl^v>uaayώю[jtzμyg@ ><mMyT{ .r/`yLlcJtxAy%dxyU$U*GytP1 |>y1+tey b&\Tw{5y,=QR iΞ)1yr -R/+4yrCjY wJ~z^g|LJ!z{)}+hd_ںzlſiK Xez> V-}z0M{۹$l<M9zM&RdrV(-Vz!! 5KE!(]6z%bɦޠy1_#z-n=fRz7ŏ7iU 'y$y>zM-_ND'3ğ2xz_fr- 'gzj}0 R70C6znI䊉ةF?EF%f`zu"j>PUͣKƊuWzyR6$X+tzzga5/>^3z|5t;:84?gz~&ڑ84 @(zͥ&eU*v٬zچ=Zf^%zspy g]`Azd\ԝBM6WzE0/kn+-Pۼe_zS m=Y@i|zs ̅euz϶+^TqzҚq7B5+_cz_8dzw?yzumw/&Hn{ tN -UIr{][n34Ws'{콜QF:epD:{, c{8{pɦ%{BRTlկq{H+`wX%/FН{KcS``]vK#`{QX >hRNtc{UE~B `ِ=l{Uأ2b,W\C/Ⱦ={nm2qYoX{fW JHjK|)ձJ^{!No&=ؽ8#${qto@ܣyA{e--&`0 Ŕ{<˸-0;5Y{/aMlQwM5E׶{ &bR{%}<*t>{o CmQO@ 8e%{ Rϗ 4V`A {|3Kfo x]{ܮSX!Sj{VL1|uy{jS"?a)S۔{!)`T[ ZKXwT{| 6q0 0{' {$Z<MFޅ{3Yӿ0Q `{0h J%w?r|<ţ,tb̓|"G\J׋sd%C,|*H*й?ʽ.jf|-;3GJ{|2"놿r}R-H<y/|8{e)7笇$:N|O xK;V։|Or 785mQ1r2?F|P~\v@ ||`i񝕇 ~ M|r[=p)ap|}̉s\hmXoZ|J5N/`Jˁ|i]@t7Y |p q0xXи |EM&=*D-5 N|wMȏA5‡|=9{,쩨|ߢ |iP%6i|6_+/ƟyaM*{| CϦf͈|扎3\<Ӗz |=K9D=}PU}LjETbcJ+!]} D)"\n8A,} !#f>}4!U>܎}nie"Sj}4DQ$ޓi Ru}}7d7ПnFbbr}< ]e6nlMGg4}?v]Մ78^S}@ʇ:MUv~]}YZoJX[^<}}[9bMw}k=eIP 2}nhB5f`ѠP@7}}"yn~<-}|d8ja }:ͬ5(}C5ZyNAU,}񔥃 7d@טG}Ṁc>z19}`3 1^)Hv}ۼψ/ҎL}BϪl4D!}6<Z=9‹J$}qe&Lš.qŜ;N}'Q8 }j(r&QI[}2s`cf.6s r}Py3%# 9XIo}\ak|PqcRI~ϓ( δ !^~*2 I:_~D'+ \#o")~M+G@,QRN~V62jS{ϸP˯6~i+E<_&,a.~iCNvőrj zXtV~y" Ǘ:~9>4Z,~W#N*u ~X~3 ~g~4ϪJC5c~S|@zWL~6M(/dC?6~Ձa sVԵyߨ~@t^ b;]~ϬuQ'i{< UY[W~zqp; j!~/ZSh+U/\#~<IS;LZ~93) o7SA@/e.- f?c.O)]r¾WB)u aufҒu&0~Ác]ӕ_ B2+'\E )Ҵi+k6[bg*H) dVN\ 8) 팞0A=wZe*R;/g&K<Y;m<ɣ3&7D?'hi@{gR52ȗR/XfBLP9VP0Qc2[Zk](-Z,uHhܷ0QGb}(ɷC9 `wOekJy]T gwqzZ*k"qF_]p3N9\opVtkO08Ev dkp+eI4wu\!tslYSv-݌^*=E&"|L..CdNl!!+4gqߒ=w?ىH>!&pMaa&2rI.y%Q*"5> Xh:U$O2lfur>uLi-N:َ2_+E:qޫl?ٯd1B_ښqm܀KAYRmT%ZAf䭍]p \V$Kv(KF(^]7@r kA9bGMNZ^׀k`Zz`<8oyրpBbJi&6"ڀ 1\)%(6zyD]<ۗ\F,v0ɬebI7/}M~*`#d9ToKAb!JǧMy9y\ՀU(ҩ<o?r S8^B{2L6B6ƏXa j:GԀb.WH$5!zya^TʧPnc*HfRK/XC( |*.~I.l4k7pCդݯs,D cOrJ\04oLRvQ[Հ)%Lbࠕ* ,Gr/df-v ,쬸ars,{` I!n`[3uW;ݔжZ_,pjuj}$ZH7PI9q}Zf2| H ۫"KF4O(B>WÌ5*M@:fiPJ1aϵQ_Li:i]"Ȉ7"S:N"84p*,[Xi]2~"RSmP%)职1/pt](]*`|]y8DTN8[­p&\lUyz닁g}Zìxj|lai NBr2_5jFRMypAN3K<_lIUv/qOYwVG9+O.7jN<w5oz G[kځb};8¸O"Уo1WZQ:v*;dJ卜hj MWEv-JxY,xq2Tum+@jk ­ ًO͢|\lO&x 5JqGx0O^c1uZC`#5PndZ;9 [ %/`$kYKykx0-C4V|Wn;b͌Ի~A f?dG#GxW3=SH/+˺~'ЧWF+w=*tDzI3zbk$u4}E+:u6EŇ>-$%L"E`p{䂗:~EDdǹ9;U!| ujقn,bs ]~ "F灇Dx \-BIvE-ǒ %ĕarKVTxEu茚CԏkσQx t@ZBTT&Y"_o+vpRRJǾXU.HhmЂ%?3k$*X{,r'xJ/a E0t<ZNkRCTl6ʇZD3YꮿG~%vK pCFl{C {l:!aa/]$oZNg'Ebp޻6σ(B!SSC螽<9\,`}/ҵH3c/Ӽ 3P' (DH` QnH$Rp`xO(c&GPkb$lU&v(Rqy#v2$_Ht"ܝv3SF?D+}ڰŁ >qd~t(4s8Bt&\߇`ęhϷpmBB|I|p $u*0kjaw9~"˃c|Cu:9EcC6>`eBK YbMwǟn1C۰eOI/^ w^+ZG@={ 6 9 hc|53)Hp̃zN#->Dj/ʃfO D@玄5B%:R^Z aFJ?~9R0!펴 Dy8p5K߄,p>TFp.TuC[ 5Bt aV;: āMәo=RƄ@Sq cT%UYYIH(1ce]D^:.IG9{%<P9>:I^dV{ Ge8'q\2`19rFiKa]h|K#[a ɯ+1~ K'j]k7h焓Yg4)>l}v0vJ!zۄҖ +I } p,"CBCF0qk|z|` ۣоc}~F$6D9%࿄4vV\ rLngO{Cﰖ㶄5_!*Xa\Yo>3`لf/lY)=BIمkO)ө#9y:n;63mV]GMW;L! GCH/h҅'[ ,W kV3dQ2e,WJ~ "5HlZp4r=28#P- H _O@0|Bg=>u?2b"Qʹ1cƽd]ÙB:˹ueY-^ b]q9X|N'<v{⅁F *Fߣ7D\y-v5˨A!u^y5K9m IչkɅl|y ng#n/߅W?p^E.K0LU:sGD g'j1"XJKa xߤ3`Ͷt/)M}+.ޣ| Ib+OgԮ+)3{SjMņ3<y; L9 kC.Q`,~BplSCފ*rwAt!Ǚ| *]Jن>.BEܥP4}5n*{-BMw܆*:K֮$Ee\[O7jN3t $KgxEfm4 k$g0z"8OcN;nxWd"XfFs.&ZYۆ"0@E7`뷍{b$5EխHT!2(>~u{){5چe-Fyg 豰g`u!Y6qX"gC!~U%\)yye<㺚^kx<v9نyW2K݅鉴%bEmٓ8ShMV LȆ"ocZ˝+OԎ3dg|G ƙ3N [^^Eûކ&5Qt ܉ dc'(~X ўp# SBʥ!N҆TJCTC+UN9zN k퍾)v4=W'ǥ"|e #dtӔyyճ8;<Ɨw"J=VxF3<XݮGIGm%e4Xb󦝶|&w7Y}"ˣ~R;65[蒕FoSm AbEl*jLڇLܚ霾&D`50[TbVY)}7kS_dgp|`CX5`Ssd2Q~6xBT!ǯJ跈3/j0@w ^jj|b/7U$ T3l[SkT0$'_'d*ILؓ]hf^ X>C $ wDn<0],=臨|j` )sX*=2fkڤaXL- 8췘BӇT3.YgEUs?'_pyo}^>+ۇTΧv֎d{3,9V1Y#؇MSܾ|8-ڇ_$w҈(7E7xz|5<>G7crYHa*c'padK]ZSIax0Y P/λ qb񅡁̳}ۥep6fF|+_S94-X6쟼mEfƜj[Q[:u#>@(BÅn %O08UghQ \oW󍋒uJU6R=-alQY5`*yZو泂?<Dx= *ǚX<uPi</<U\KꍹOlPz= Zdr|< n=dYxN/3G*ݤ Vj ZW썉)! O=m}1$gBϳ@JkيN{,lZE/ s<+Bà,pdW %YPڍUcx!QT<݀W4L/Xn~5K^J;})} pb)+(-E*}=xmK:%' 8@`<$IIA? 0EQe]"UnP<d]0jEkuܲ13#&Y#eUT(t@y؃qtRbHZ8ObwV2N2?sblrq4c,JR!6`9X]ʥĺl 10Iz}k=ՀyQrS6 BqNlj#r&$CȉoX[2dE[Iy}ʼs";ߜݾĉ +ѧƺP8X:X+v"egbAfυP$srCvcq3[%NbwN|ފo=厣iJT"v(`WwM K`# j7"%7'hF-hl2bh)<o3SȊ8FI"-?ۊD xiɂm7-)ֆXps"f%Jdkp (Lp Z\ tsUy5tn>_ t۾|T?B\8ahmt,vmjMxwxWxDTsL{̖*n],QeL8UƤߝs~0Ծ!Q늌Cn:bOo|9Ј:WIUo^g;_tټxofP/7*YUֿK:Jj¸%vAb 9RPX>-EoS9$hۢ&+fzA則V_J}|R$(@nq teR '>mJW*% "+@`\-sCnYmOajɴ]-I .>]-$u$wk B aˈzۋ'&9I/-2Y|-'\B 07t73]Uff"x|GQǵ;gZPuVx${L)FN< x$jb@T.7XHAbU2&i8#T [ %)LbSƋT'S#€<`dt94t3) 8 sOf(TŋzUK*췞>L5@QYDb{ZW ơ8@0D{I- Ab"?h2^h~q^ewVYوKWiOXl<+d9O9!:!cJLS@ދ3Mb4N8f愋Ñ)%~ލ{Xlp '3)GT <zILE,GMd~mth"Q 2ɥ&;~m!ً;.0AQzš(%O7g ߘIݎeq /) &(tnȴ~=~6!4ju-TZt+,6|,3pg[; b)ޣwZIA!/6ü&{cI43L·ml8URJ[;wdk!9*_P5PG#\Snmg"3x>;Sl7ze&^aT` x3v[p) 0ԀW3Unތ;&><E,rE>jf^nW匘J#UۈANB>*^0N[J7k%z&5`])%)G엍SIIw"eZB qM~+g `TцR94, Fπè@TT<w t0tÆ7#kJK{Cd;VH}q\g)fe5(n92c fA鷌Vhqɽxf;8ۍYѼ?j9s! ؽѕ.jP hG!ū987vsU=|$lu͍b)a,&wiُO~+Ʌj^{h8my5M|6:jJ9>W#{'-`qB! 6lG`l:M38 0Wc%, d"îX9 BdN*Z 󣹸l~ƍwT;Kn5 rV p蚋 X}G`E󊍒o{$/yy0Ij$M cՍ F¡.dXʍdiOZ ] #{zd~6eF5F[ڒ[Gx0e>K/TF^(?]gƏ}-6eX^륍&9DGߞ6LEFėH<YDXA._* ?q>URD# cq;l=A<$idr,@ܻG=ϰ=oCknAR؞8?]XyҘ5Ɏ^fLʵ=~SE&_NrýlQ޴Z}E*(g/\Y%lJ;uCwL ȘsÖ(R"sp/P%8đFZϲo&A2UVq'q 46:tf\I YKZ՞tOڑ22*G"Վ=Eg⢓3vjﰚj Pzl!b5Mb9Y?3!uvioRU J^Y(F@q\fz&k4'eՐ&{Shnja:1Џ9gMk8zzߏe:!K u IX%CC"!E,+*qȝbH,I`XX|FNAA6নqJ3mc<IhʷV_s_[6M4o "`XVa(pm_8 y"`Fvt_Ή4i$ryUl$۽dNI擪E&#j_1$p_jJ${I.;'RMWUqi|3 [7(ϳDsb0鿰)I{0pOuVprfP#^5phsMUF.f ֬͏zjSYCTݏ;L֍˔<oo' 3QiL*+i!揪zZ^J}eXF"Ox9K`qֿJwŃEțNqh:+_*kHdBw.XjWNq40_U! 9Կ 6. 8]x<> ݇"\c4;CƆi||40moԚ#-RܘK-Hΐ %љy{^(% f ELj礮6Rpv RJ>^vm}dJ*k0 k t#L!tH#A5h'X%4rX!`lА/vtaQ?$O8ZnZ{<qGȉ^;"0:]5H @wO'Rh%ǐRAi"#w*\/|WQ|5R א]D@p.gNIbTn]ܐi3ovQs>zاĢk7WTsZ-{qq2WiNcCEy`#$do9A[#-`|@v9^GD:Hש< mkpWe㐍(&U 8oU3J,2- PꐏޗыCQ#ۜ&krOx䐩yJBRYZW!qFJ kmbgت8\=*7KU# Ϳػ |xkQƇveVxW&}{c/508z ;lEʆc2en;މKԩ6^s&5ɴE99MbVz]?uS8];WM?뾔|#TZOI Pf&_?*.\i>hEx={+`Qa!i.F[tođ,N3e㾼iF 0a~dږbKdom1 )iHXp31Ќo48EnqNjwD: d{wf@mn,=%  Fx<V)uⲯP%+ٻ9Z ^4B4ΰq˟$`8X1V͜,yhsYGc1ΛV!R4c$RW3~DfC\eAѕP(2hz7{0B'2]S ir<[tl(H9tbTF^^[߹7&B)|1X5@PD0꤈T jرIc :̦=b=S#X00Lɨ+_g XjLצ*-%EaK`˖+u9IƊe*ݑ+4s/O&~zF8 K`(Gt/MiV7Vks6\Fޒ@7XN$s EKg^1 ]ǒвUCaEjy4<:h&K̨4ˆ!4mHϒ@aa_˔ZLU\ﳆ0&S!0:יe[ڥEZۦT+8YDD5HAWAx|@$-ΒY)imZ:V?Nzr&jD4Ȕ3V*)RJq}:"Mi()lwjEN.ܽ ȎwϾ/ KtNRw!CI(;j+{'/XB'-pO8 \Xg3σôն>7?i9T꒴Dj/=n`=~NX F#xL"{$bϔcukK7<L5@ahZ]8-<%5LȨ1rJ'^gp"}o Gp L6|10ZztGę,͐@6`WK=p>C;w2HG?7 bsٵFAOaҍh$BGql#H~y&;ԳW/?wύ=;,bXxNv.@nQs:`(O$a)]}5g{q5dvHjfin"I`cF' QgAHS]e8ٓ` \,mhZcEw=QYӌR,DO"ۨpkS4|R D;\W#x#V !{,9ht/Q"}g7JAXǁtC6Oʇ+/CƵHēbn,ݯC0ZPKTP LBѓG Q[_P 5G/[N>̓X_(ud]lHք-\߾:F6C")x~\0SĻ˦K~)Hsle#yHJ7b^70S,|$l y0t>@G\{!i!=Z9eJ{y\nn;ov 8%aOqEvsC =`"$&ȸ7> ^ Lzv9{v&+2D8e'imjwxKF "j(ϖf$9V˝rP𨿡#,-HƝH4L \F2*R|y h^#Q33VTp$TD>u͈Aek' rT NE .?@Xm_1#rAvπpd?`ݶ^ӰWhf]GzzXqlGEgM7q4aRPaٔj;QxUz[$2*s%H),Vk}{=1\焺`sk/5[=M %e+Z^$Idc甊^>֬9p0KSIu甘@Q{c$Y -Yli#X)`| 7ZZL-De 5dTvb(KazcK4? Df. =0x+67uyg3 5]0ԩ=F;1 %*!v%{\42A>eqOeŔf9@n@S֕5lA-Q{j TEl#}R{'n5ϕeiQ*lLE]$fi0堨}P.HX%kTöFe6)L QrSu$5")1 CnNfO>~ѕC/^ 6pOT=rKtj}޼#Ց'^P<x<?Bu^íd %)7t_Ɋo{$pɤ \G3Hynrse=y\(fbcD(fiuh]ceBJ/5̟g\ gp("#nNYpL]OimP 7񪷶Z$aY _28L7? rjazD֑Bʕ)h3[0e^ͨݵ4~ըhbz:Fq!;*qzVy~X>?ѕߍvXHZ(DCw#uJ<`dT3ПePm$6;iTc܁O$o;/(<PcZ3K維ifL}OָxM_CZx+!-̠~`9cZRefdBb!YsdX%r`␳ '*~'N 2%H6F8?v%)q|aE=MTC'*i9"ƶ<[l2W1wtI Oi_Mp6aыRn A%jWيm=;?v*"ƖXB;N_k~؅6|N[U=E)ai+`-[cT|kCp;#d)UX cT"AGx%ed-,[V<oe<Y9 ^`w hDnK՘ Ϯ)㖂Q2f3 rj Xo";WƆ)P8v2*H+΃m;{0SaqGÏ31:Hz؄ԷOtG̶<Ls4SLL(>@|kc<T7=SǚږO'c5fxf[O1kbϿ9Ffn!$t-8M嶏4ɗ::X([Je'זfB(ur>̭޺U.-٘<Ru0KC tꐶ"]lL(nКK7. .0bOPW%_aaO 7AzWNŸ_98,貍'K}gWR $WpB?${I8T,9eG $^{'4ZƗ 5ҙh 4bʗsIO&-z,3i`|搆zNT׌^|8Eח[ĕBJ,R. y;ZgX1qTqa;_c3PQ3S#N9w }C 58GZ_+u[Eb+E~ r]qP֗SpCT'?2H>Wj 9onj_8Rp(fs]8a!%3P2~1j*UzO1CLߌy^ki[d 7x 7{]<7Xm)U%K G? fix}<Zs$Z᪵Aٗћ#UHMMbY" fk(蟰4] Mm}dȕz Aahak ,_wLGa"eC[>(<t<'/D|_ &^yqIJcZÚA㦰Qv+{&/D:VW6ic߼.A-4#;ЈR¤Knwu}=%`Fĝ"Eؘ|φŬ d-% zZ)}6dlC6oCHc4cS"x nkIژvvAF!UZ?ˤg(%xXy6IJuwI'^? ∹1E.DvCqyr~؟LF^GkB?;՘P aMX٥'#˘RCׄ=]%f=XTA8JURo\Nr?:e"ݘ *(VD*Ƙ!(𳞤_8V*jS+Z%9oAmUm5Řjj똦6<ҊØ?Ka6@Inh7mP .˒H/!+^*)=LQ ?u OB8hB%-%u,8ztigǃN==| fb|oZQyC3ӭ.GX3Gj;^Iq~$.n3:@3 @BR_fg; ߙ N^ RtS>E:͙(.}f` ^w9YVU+-dIyұ<\ꀕP3S{O1}a='_qs@0ߩ<= IgFl_2gB%2_UvejcکVEy}a83N͟3Tc=F~CwHЎ;6jAy0~gq&P$< euc)V8w|s2UvQKYp-pXZi-nѳMEFc iRvNF!J4Z3lNGe%.u*6O a#>Vwz'"bT@YJ'ǨCtxm5IQ NgbRn]@ J>Ή w2S%SG|]FBc[]92EљltNt-FA JO혫o=T/ #\so(riq&lkk*)K̚^L?̓ -P~yS (6|+nPz*:9Bfdi$Y܊d 4"FR W[maNNDF]zȇ9cZZ 7wQه"П"2 Ooi<#s֙lBؗ)")c# i6<r+th~w/zК9Fx$!et81a.QKT9dAk*@\66,W猣W孚nYM2+1;psvmo^ bGr9ffh䉅VКLjps Ϥ5:v @>aTfQ&e#ڧHʚ4̏F&!8l;Rgyɚ4*N/q-ˏk*쓚5wlӟ L>ڧQƚ ^JѰX6UϜYr씁:PLԷxBϔy^= S^}Ayd`՚4 XtrڕOxh齹:_Kn^'L:ygߛm̓5* k;!zj.Zv=1;i^80hzelNqf1ZI]y~vcC^7"1QOTSQH(>JcVX8U1R{u{Vg?Ź+ZjYPW_`"]E8!]UckͼYܾe2T$2tyݐ{_>iA>0L-v~&b&j[גj\\SI^ni!OxvL#WB'ppeL3b!1ϻEۡ{McL75#oG,̶KH6 F!zx\&/y^aa`hX7PxFܪч>ƜQ24*#.OAtɜ[i8 z'`WĆlw,ֲ-̕t؜SU[!Pb>u Gj`'dSf->:5?3ܾ:2X:O $v'?NQv^`)ɜ@#*DF.orr e 5/6Gg/w{-ph4A}#lLV%j$KP %-q՜kB|æRMԺy q=pz\q<ԑ0K{$%)SE4Kò'pabXyseya^/3ڋ"N:iPˏT]0r5ɼdfM=/zȑUZ̺] Sm1Z~(sb*s f6?-w5{lZmO-xR{.Gr{s+m0M4I+NB9-4/,e%6v8@"5OZf ivk&6jjR;iC"(Ӳ(hpKiC=&t9JoR\ڝ|<8$mq:!͝%zԧ,e4ds1#,_S0/b59dMN:c<TmV𳜍.$Q}NeLG:Rx&\RRH 1nΙ'R_5Gq~:<"F_^+r%3ϰ*. LTl'I\W9 Z`ݸx3[j1mNˬ%'/JI#@ Ջ}\lBrޗ X.Y}_Qxg= 8ɵlNčzCй йj>_</]_cia M$zǏ7 ?̞\[nг  %Gz.̷ApKȱ`D_T5a0\qKfU(_Ծ`ŞMbL3!xLMWQ)jX3=sr~V$`{9Gʑ4 yMza+UPU::jgcWMȄS a *~f8!WZkjٳHmg⭇(NF'F;Fs:&ZDY`!A2OvW 䢉ԃp;@p=aF2Z-Оjtϋg>NJКUDvfNQM)_$~8!hcGW|ކgsEHy@O@x|2L;'{Y3IGH*Ӎѱl.)gcWu}o^(R;7&Mp۞/xh_NzNߤ Uv9Pߞ4\aH6d΄6M E-oVJ0` N.HsH g!/g*  Cw3؟aU~7% *<ok2~GR:xNTL`u7crJ¯>؟!k"~cá%l<*ld9 ND-C9ư -KMhUM3>,1a iG80|;.[8/?ZC|Pe]/+2Ld&>IrnuU٬ͨXӧ6R(#MUݛN<f+M-OPm'dXlX6y1ȃ8h֞L(9fɟz_|"5}62Wvn~e+Mё<p6W\.DOQ~d1 ,.#;4"5y +ˠD..mC"Gc|<0uPީHIW!2nހQGZx[l ʙOzW:% gQ>nWNI&{R}5q):FTH܍o1Smre53[FL' aCQ' >Z\4EmL> l~* j|Y$| =´φ4sP+Bq2]]ml4l ,pا ] :8 "ZJVLcɢq2?@Z^ŮV{\yJrt[KnVF :Z__A9;:G.fImD2,C}&g =#@2Wwsk3۫: got;"͖|Uo=,&|57)?7n@,b&UjK3sXOveN-'@S6oo3_0ne}Qלr 2Glݐ !KoRۻ;˪hŸO9r =⁇Idr!@<L1Bw(h7Rס:T)Hs9?h# :)\B/BxⷑMBUuDHOʄr[gԋz0Pd̟&2~9 d6hE&X4r@E& *@TgD%&hQĆ<pقqNMd{-,v!Sߝ"e~1ZP/=&ס+FYe"\da,+$0FjG]Tʷ9ULh"W<"!8y *5HnOS:lSɃݒءP_ºE,Ղ-o*Î>3PU[ t `+9zٌsV5g׼^6f6 ݽC6tûRj,0Cqt>c6-|JH 2\s0!E~G p-Bޝ|=#|Dh!ޯl;3A<t<<[\@ti;s|YiBܼGX&N\UOtӢf":D]S;tVsaG"(Kǚu͢*&ǟ> ?TwV/›zM T8čXwtP%dD ɵ;wmc]'%w ΢F&veХTHnc]wno>pLp/i75b1Q(WlyvG"FʒZb LھZa^`2O)W\`eܝuL1.ԝgpX+K h^[[8;HqDZo򮭩_r,^`JQ%ҫE rVgV`v5/, *wT`֊W!qtFcFsKlOd#k/ܺn5:N{IsʫeLpb],)0,1i_Ӭ|fì, \2pX$H-*CYH{Ya/ @zޗ#?L lH<p̸] M[9u׈qTvĚE5n`pf,'ƣ!Dwu㹸\֣.$4il:ң/Yr|5{~Z^\S0b7t.q7i8Tâ'8e_w4 94cgmjM5@P, < {}o \p^+jN툓 Tߘ a1YF5c0V</r ebN %`3}bu{_{$%pDh11ԣwE?a&PPgXfZ Pģ?ih4Jky9 TA_ݜ^Y5Y$}^|T9wIɋaF: ;<d⠁_'AFƠOX0C|$X ף쿋IZ< dٕUWRryDc:FQWB0c/7ԃ]C|=u"rK@2ժ{'B+u]L;B*6:J[#QIHWl#){J+LkRCe&?8a ( ,DFsjߛq`"69lV Ag#UVHEO切ds,BexZɵB0^6utK~ FeSqj +={Ù E= ,p dQQggѤNu$Ǻ/XX򭤧2DP,S:;& J@ISNn#}&j^ĬKf[`"(r돤؋0 DX/0~9 4xGL'~s61ϥd}Fq_M GzQ b,^k!4u回%CXuP #6;vO4Q$ӇX0H(6IHooz"ʐ*݈7(RZS.5PyN>0oۡͤ>:/M4ǓRn!E٣<$S9nhYC%:{m fC"#.N=e^EQ7)` *{$X=f5`:2'4r2%^^;g& ixCSA.e{""xy/in7ͥ-z@!jFM|g{LkJ A߂8VbP1٥X8$FpkT"V5zqG&B/1މl$wUFO(mhڹJ ѭbq ?) rNO[*aE^ty)bsR@2 89~ΤCKa۟3z@KN*Cz I켿[%&܉EesI-wwvM~L]A [D,ײGH~4s1]wUH8rs*xK<zQ ].Jϵ+BQMW5*z{:[jx[Uݦ2Eu%0!<9֋2Hg> -y<"}qr9x.Dvh2fI/vJ$sb VKt-fmɀW%d٦Q"+!nIweYb**䶦Z8Sd䴔B%mG/%\¦nԗM+?j9ƭsu#(զ=#R7ȵtM6FF!e򮦔#3v\0K+pdЦ9;gZ9*Ł`y56P"xNӲ+sOS Lc`< p4rqP"\έۄO^渳Gg0zdF ~,.Ib#]¨E`%(r`kq:բő䓨17tP.ɦa}kUun@EiK留ػ8*@</J@ Ԓڍm'p<V̭KYczAOi;m?2PyGGOXyG_뮯 j@.+2Rc`rKHR@NvXbd'|(60B[ry%30޸5;7t3S/y=¨=6vy:Ķ:9Ni$3iɋf XZ%'T CqZ\X OJ/"qۧd kM>FR!uvٗԡ 779~uQ#iJB,qK+|B𓤉dWޡԧ U>V+Y\Ȟ0gaTR^3La=秈"3m:cGwp%u`$Hlµ-9/Q] 龙J7H?+?.+'U;B"(<]Bj |faK9ؒk}{ a{rdz,3>6' 4rD`Ka}J#SBy)>ߟ֠RV;" ; ~0ںi4|3yW.hR2|gs; }T/@WӃd О:NL8 Q +;,VelCŠBzmZ+H &YI I_~*(=jh>U߈9jsY}e6!G_7!xU>({+32%/:N4+^ ҕA._ v˓5O4m;qʅ'7+t9#OknG_jy3;U ZFߏ,ﺀo8I.8ޏoGVb)W#W3ͨO#]s40ÈXcm$4R %ZPOY6~F$jqemMRo ΂W+ (A QL.Mh8,X_'n ¸l}d<"DXaM:p(O E8i j-Xy*k xH0^|OtV W6zw\r:>qe)r'`qttkr 6WA3J۴!̨Гc7v ȢѺ]Rmܤ3Y'"f, ɾ(NXXVI%Y@3`Ď/i9ʮ rvpHZz9+ '&JYR ][ p.@e~ʝ5Bf 5]cn^ Iߩ(iBZ(q/y4.;9U-%zD]M6X /c#]Cc/%X+(J8i/̖MRu @z\{yީq[Treﮦω w7|xd[F>\%sL[YEQة"2EBxLdqЩ,tWS Tdi#8gyX0f_H25_^m P(,3Pj SY2˪Jhmʍam\' o)$gf|8qp.K|.H~i,"NR{ڬO4zabӁJz>B^]0e$}~,27wViR$ک%Gٍe>zf jaTd#W%s_93M~SA#/$ wd{:@}N;+<8z H.\Ia <Wݣ@rNNu1ɞnFi0 k7pǹs3S Xu&?epa̝dqoж;f<*г3,5dgpknz!џ\$Ukraɜ|zZB!%?8!a0,idTΪ5Aq_\S󪘭ldٳr檰4priZ!;|(E/(}I $ dg(L_%bXSRA( TNH$*t=oŽe"}؜rVr&2SW 0Wbɿ>j?y9{5ШOqgwXHC4}Ou!ŧW)ͪv1Zώ'9V-1xmVzVB14aǪA (Tw41֊gKQ !D+@TcOH$Ng#o {O"o%e32q. _&T ;zL?d_B<PEDcz>`r~=PpSOVMok& Ó&zbCTa%PB$߆}SH UE>1X~s\q0@{['1^,1N ona O_6fE^4'o*jiMyq.А%vg+#p3`&X 5_ig) n!ӸDn>Ff嶺z$(އp Xû$V5osr}sEmYf8\I V w`cw__#!T[w-)s%CpޤKn7\6m28K8q\Gh&7YY6nT4KgpR I": ڕ~N#=oѴi=A;̃Ҭ,U\eޯ_ۅ~,OGOi,6-V"|-Y`jg8 TGȫ|XE2DU3ok4.lŬN 跎v#Nܠp$leB 9BgO7|N׍i\+L֏T0OF; ?OIpy_S)_zjs![Tr$n0r(Q+( it7jzەlyOL6}(Pus6fQ_ʙ΅,ͼ&?!wQ(,kp~kmc(Q|",0hVԬEzWԬ ˿V7鬽ە R߁)%a.QmG~5?kT|ͬˍ <E>gOm--dJΐOlNYTUdFz Xj&ݪKKBiٛ}Ʋ㧬%5zȤY߬}[6ɫCत cgs2*lfU7 W칗K{G޳RQt tDRpB2[?QF  f`W v{=# vk G",A8CK.(58x:C01˓8>`#D7a,ŵ18|LbY@ֻ9|i$N:V}#\?v-@a%/-E8̸SmE[,ԜWDbRU[3V0h9J)*Hۦ뻎5 C~hm!h)"0K/e!s8ǧ$`0N6ptayʮ$ <s\/HaŘ1vNd!魸IWp::1 ]e&tL8b^YA3]nTئwN36`K9I=v PUy:{nuclЩwx_8f4YkK*6 -dd|8A^z:GM"H!J]Fʣʮ7`ݧQ^$=92LڲAI_iEhE[ !tϢCY8K1:9xwK :خM9NPimW:w@ѕ5]U{/>¸Z|[ҩ0ėPî^^m< ޵ۓ^TtJ}nѮlXHj"WxV{}Į{prȷۣYO[[D;H1I7cǮJ9X An􏮐ln4"g_@ஓd42a9ljn C.JY1JKS= ~%5!݁]CY4?=Gϓu}înqͬSyY*8P6&2_0z{r:I ?,0bʮwql1eˉp(?EgZג]ǂzKA W/d dؓELi*MAXnGSVӒ46e螮3cn#jg6kܮ\fA8ǯfm1Lo>K-ЯwV3e4E㉽O$2B Nb+b|mD8W=* I{NikPA:(GlJ^凴sBS<GsU7L%1S$NvYN˔#1Qs iu%>;!A=SUunS9 T w#ch0(Uh<XRᇯW)?^%v2GuX~g+_+y-c39ޞDle91үhP><`ݓj-J_m6)X@Rh<4hxʗM+tz2D:'A/k'xr{(O71}i}p}xe/G-P N/2E{į!;7fv[^󝂏c|$}Zk!B7&K[ub;xv~P!dǯ*J_:H ,|'Ӈ.J ߮ \w0< H4SjGjnDsW-L=3l(ө}oLbd({gB^G'=ΥCb.vn2"%?-VӈN}w5\7Vw} 91! -ʿ]K7Xj3{|@6`ugfÂ\T\ .S1Ưw]G]w_6qU<-lvێ] JƑ&wgizQ&Dk/ENZj#Z:`7"U}a}!2D*n"[&9#$ox4z؆}x&5Ұ;?r#90\ذGF#[ kIF>rC ixI u$xY;%(ѰM-77vޓ0WKb`F bNeUX8bqhTg A7Q9.un7#5X2]嚰p~0Ӹn VcNvSQR'sLѰ{A@-rDYII>~k(hGu*U>4M9:'Gf9ue9ڰM&eV)0SC}/Yr+#WO\ 슏T9emֵ-l ]Q~K$䰚kuxha,2PqxFY "7lj{QW?vA{-"IM'$ ֻ^w$&Co. ~lYt`-4 e[VOZzL` ;4oڱ %ci%#bO5o_=w;k<wv*_$Im_%!$LQL'e`{=:-im6濲]k@ 8Q@3;]dD!W\\Gbהּbk5|ۡ@kͨy貖m+А6S4dbN>} sl_ŊjﱉȺ*tIP*$JM!*05oFH<sٽ+;Y$mtQU}+9< I[qWs¯}19~K#ܟ%|"y]Hm{ɛTrAZLJ\Jh@#L w\<l˳&s [EgrrΖ./ ZN2wҐ{yIhXv0޴2 Q޷$򭖭v+($qA|OiJb4Q*\D y*B ҙQr[+4Ѳ #6W8g$=]uvL= &7CDa. n܃ŲKMz|C>:M Ze8nT4Ӭ߆T0'fZJ Jy|3JiŐ0+!q6 \T2pCkgQv|N㲒V`^֤?S vֻO֛?qxV+◺i+a,ʛ%D?tR?2;ьz U4m&6|[F-|0 *c) \[CUM2ұ/Wp!sG8uZcH'0^Բ%Mxɐu߲ tL(۳F۫J0# Ǚsr~X4kޮkƆԦ*8(K-94J1;o[?&cۥ*9"G|?È%%+`@՚ܪ;q=zT6|Ot jcZ[DF|`<X9f3>36n ̳pw7XP -`{6_ð QީW_QUۯZkbn>ԉe'ɅF~ϭQRsn>$G;Fdy5ujx.?WIpγ5uHltϊ{T Cw,KɠjN5~Vvs3eUMP l x"{9_"O}Z3؝Oȿ U$(_=w$v;퓱a18AbjYǚrFErDԋ =u4@[E^{x(icstjΑVM`1E0v;A}$'3Ғ46H [倍iѳJK7t6ÐOMsAB}*=2{4w#SWݹ ^:S.9l$P–uN?>f*(^YcVp#\w~Ъϔu }ɽ3~1Ir *_JʲR {? ζћa(4O.]:~O\Mps",ɠ mMar./%V9a}81At J7(at04&hŴB 8n c^DUX+:{+&N;-5H46Gà2R,QEoڻnztWiW$PѠVz<ڿ`!`M?ڑa .X@5w&63h&Sdu;=lWqD?Yro܈sE5X?&$r->kH@bi!mUhn!=ôT} ovэ;䰴;+Ϯ^n]?4:PfQ\WQִmlhppu8>iZaƧ]YpZnJVx t<W|*^K ״3S \O$Fl 45 ! U`,JF}ŨreiGѴ~8Et1j`djN[ \3]rqbDuTNL.DN9.k;qq]_s*0 zk1HY KAT?b 2 qR.;E5ؑyMMUB<VeFnڧgх@%Yo0:ݵ rJ\!`ER-@H~l\gԹk<Mz Jax=9i;ne><[¤ bҩOtG:`BӚ;w'GZ*;`͆ݺTn(H83e2PzTv<SD*=ġ6X<Ib=~qN0XfH5ޠJz4qtf{-+xo胊F)^|cn5-b _nrü<I[N]0CSbo.]d7aSL&F 68}9=@8};iugKNa2=&{? հ*pm8GKz޵O;kezrĽ E䦵rd|/Z5h$uWZNiBI^ЪA8f93 u=YEBZsDi = lb A[ LhWfdq؏JRRWhﻳGZV^^Ƙy4 (M|a,(7eC|} -RǗ)'O+Ukzq*hm(/"vmBI2lh6FU6<2r Yj3dr2- *B-[-fHwAaѓzrv9 IIDvRǰ;`Nαv GmaVv| O 5k"#J$RbUe8 hrƿ$GݡbPuC JXoh˶H9JEմQ׃'NItIKM :eLXc-fvH]_Č :PySpT,Ey(?ӫO\;tqjfxLދvRÖ0;}G:hzkx`@ؓY8&W*vH9be9n7GꆢcL'>9" 8*vn][]%q׶֍.}B ϭYm`Ȁ0[サ/I$V=oaaYoߠyLV;۶I()C̞|y| r 1{۹ˋӞ& [d'ܷd똓^kgZ:H߰_DT,7TE3yXeXyve+ug_+b/,zM̩vhUwm[&UyUQ1yVa4,s=#^hٷ`pG6u 3(`ho'{TVHmwb*߷ͷ _:$ xS:]{(u-dh˳mkÁ0td% !5"[زXdS>Bs>fhr:\7J'N2)$7,ǢKODdXuewP`<8anV(ydeȽXf;0 gkzjn >jZA,? [uͳ YK#ӕH֎'}gf!KQ+.UA& oϒO˶ۣlriaR0 e2c;/i.?8d`amf׷͜zhlE=D^rǷ=P3{M $ôЕ &F$&I'Le&]NC%\Γ&ϭS&hҒbX52 )/$Ԧ.)dm1o-mr8)3<|/2R%ћdb43MA126x-J4U/]/9\=91$مSJt9z*~gWId Ǒ=vdD1gUO0<Ӹe?rg'ċf fc$ x2Q\+ SjC{Hɀ#֙%>#Q+/[ElU_f`ŬIԐ aB?C`cuZHN΍ӣ^sڬIm͹.ݸ!Tf> tH"Oںhȸ)|3E'lE@hʜ9߻~1/E3A+"Om kZ&6,e>/JK.Z_zkH0FO@\(r gTp&Q ep2ZŠ8y,XZ'Ld  :XR1\q(b"ߌMGANek Jŝ9Àafi?>d oOBTk$iFRVoZ}\U/[I67pĭI<ҙEzvU{I~5IɹVX($ѥ45ٗQ"=j llЋ~;Yg')ӉkELBg}YA֍HaČ,9#.ΐsjǶH7`48/ADOqq{ǢU-ݹzCJ>GR/Apz~Dҝ4Ìd#Gi˼9j:=}5Q3hÞ2(J"Sn?ΰ+e#pw]vӹ/~{S܁UAܻ)Q`fbNiksﺖhL5eTq!1 OXe4<;,q^YϹro$N&~2f xmz=K7 #62"SJ-oU1&"ᫌ غ5rK$O:\=:|-L5P$ : rHDs^ԚìtȞ1Ti7$nʯV\f :pN$#|EJtƁAn*y)k2Bfdj'uVبvKA|5$%Y ϓhE <?F ֤?*麾h6ܲuSók7JzɳU2e){驊p@,T.]G~!-p0 ĶX{>39)TSƄ#&Dvuv$rGc3VY/"3_oK斨 ϒϋVx-20C S5 f6v',4 ҏI5s1h8Ҋ 9\^S8Fx^bPT3xgM. =[%6V6 #ۻ@B2_ 1A|MU2Ln!꠻Cb%spco;NSWRV(뒻_F$5A`6eoS;.kbpݳHߓ'r Uncɻqoy@-~Iߥg#Jי!1VY'UVyHhԇd[nvql7l9  [Z6F|4a,] :Dy@r]>oxJ51]^K*$,qL'rfJs%_.G ;%}?^+ "єs= Ժ*xyJ0Fh~9]O&]u%qa-%1/vމG?-ߓ M_$@ L]WA>]xx|OW47׵lKV_͵*?_t{ شaHü*4ݮJs~ļ,}Î3l*SG<MeP}zq?CA?H$)\*b 磻ռLfwL2ŋ>[V>LDƟ~^_.h f:[qBpJ+wR&0E2b#5R eW0&.akibroם 05s)O S> IVۂ+|ʷ2YV6ew9 bPx lY,)쑖 3c)a}pOor]ජF k8@%h(1{꼱βRR9&;(\h owRt刉lΠhf8ֈHZ `yTOlk|}Mk?S) Dqϼ?2+ț]A^%y%m4m=^8H$ɍ z!޷CY1!Ȩ G [)[A1$MI}= * ĿאbѽX =~HҶɋ\M#arO{xFҽj\i`yn` TtO{evFxn~6$]y])hi(+V̮1㽕fJZt$7DDtjBeL=tL4*~0e>8p|q8#8:*Y# 轸y-8l}{s/U~^d*ܼŒE^q@)%~Q]Ws{ӵ|ؿTSmi(#B` v H[SKC +=s$M(XKk9:~T"nC_"(%iv_' <̦-*f{~GH$*q%A~nT"߫Y<7L@p/ϾZ0BvT^mQ@%[6%I?.Lձ^`.%qQ&]4,kAR5Y/yj7}h^|2δL&i`_>@+;0]y![,iA;h7*acdH펾 D 2.x] jvtm=oZA" ^CY,?=Kndj7LC:ʊ#UV;z΃Ai z7p;H(YVqX8/񕯤%^4<I|:ǟ<s*arAξ?I)5þaCI柿xzʾc6$P,SR|aS@β2CY4`S~[\1 #ip嵧N3a[Tg'$[n?. +DX&*9-F*]r  6D}Rz0bxѫ>G/O -rЧG$)˶^2]N2$>[w7Q?&0{}w5^QQE)]cJoVc (`WGK}:gy=h>4kb`'ೌ<ۨ<hާr [:l-wvu+,<,#|hBeD>^_4V6@I3JXfQn4 *3𿘫_bRpAu(+j!u ˎ2,bL.𿢉oNcd 4.ۏ%꿨(a8AP͖L 82*iZDܙ^4 凁̿25!uMTJ꿵<dciZ+gU&"Ԉ!O6!IV]&IbqPa¿b Ks}RsԄ_䧿QK3xbr{ο5{iR!}r0]jV ,ܓuj3Mܿ [{nA ߓ殤5̜K~lp[ΣԞ eyFxf:Ũ ֒F٦:mU)KR?iǴPUv. ծnlk q/XЀ?ޑ,li/2oŧY{l,dF)e5fx)V+ /G7t, 9ж$L|3_$ ԝr7UzSfP#IǪO*^GF(Jƨkv D'#ؙPKkw& 52ruv#z'Q|I va^xh R!!m1C҉Depl7Bxu4zWȓ}429WY`ynd|H#(rKa.YjAŔ*E&>챑M<3 u;S傞XfMheGC\0ߚ @[ ,rypW|J-S .\9-[ BBh[a'qr@x8LbN&1^P)핒Ps'R%HHw;ܤebG%m]y1vbxز2GȒ,g/I6էJ_DXRJ\ ~4ޛyox 0b,YG2H q#OCNZQ86XBi)JjHt)^:<ԝn7,`jB-vԈ( "M^4 H_DRp2[^1woD5 a=S3 d@zek=Q ?dڦ `V^%EZJKQ0dKʥE2 njJ߅RSgN GiPQ7є&Y(s <Z39X[$fi\2̴l ɾb+klw(9ňf)6sDT{6"%Gti 9vwfBXG{*4Aed8}l6J-5b;4JWUǪaCk FI*攽6Ѓ#LpTRz,2P3uT񱟣P9{57:@$VC0#\'R`žAˊ|S#O Fo3fC>;ם$yȺ~+cxut48BtLTfA ꭓKtD| F ӸŊ3z\[ q$g&mɖE{)J#<w;n!搳4&([9~G -ɸ)Xt3Ϙ*r EpONϺ=R;[90% ;&u81wu`" q JŒ&cfhⴞ(v(‘|vOP aM’yy_@/k)˜z6_S[l£JܼR@Zmۣ$7Dُeİd֞+ vٛ?LIҿ2KaS.ܞgQi"" \&:b\ lF8T2& | 59x/;0'C6 ș(Sijk#I 5"{ϕ߅ , [Ek#"p+-&&>J BUEYkՂK^pw_<IXZmFtء+$g#])c16uQ=NE?H׆IyjLcK(yIgp-PFa7Kvqiu||a-puT#aͱc:g,Sp:j.̰2i_sp h6S;1qFm. NXIL7qmCI싼`w%9`(6)|x;H[k렉_ӤËOA?^lh{Õ`UB;v4,gUfB0Ø"f9JrÛx5gET\۴&DmãgYV_ֱhê@||r1Ŧ7< WíAU"~^"<÷Y8r_1 <Xq?Ų'9^}|)ު)~Àc.6{`c:=uĦ x5ύBQ}P%Q5g޳9İ!w6ܶߧe4c= U+ {YyV9/N͒3h*k%贶&&4 #$?&+z |X0 䒸i)&'}Wq1*B**vUlyQetg8nu5#Ā#᳚P$M4ć/ 8oQlX2pĜyk u{oTъ2Ģ%^$4`8}(ziݪĢx1O,ӻĺw*"*{(ļΉ1{ny ^Ew.M`sSruTÌK& ~h¶/#JR x6=Mi@l P-.Ary3= 4&r.pMŋ9Tb"2_7#Nu-Mv̨Y8/o7i5n*m\`WА\3D!*<#piT b)a~#xB/hn_  BE~;A3WxNC{%Vv HK)lNB5^KJ"C P~R#=0嚕W6VfXgΟ[w3?ƓEm4J:L:KB(Ś@KO$O{<šەiUŨz::$tdSyūȍ 3lŸ3htW3Ukgl<b9 )M'FPWK]._!ǘij~,"˪ 9ׁ-^;U3/Q 14F qli8b5`,4^"mtZu \F#ƛr Οߍ}=:6(zTZ}? 'SJ^mUag1Y<׶4xSi`4\o#^F=dn+9SH׉PQFM 2hdDTpc:Z9f)WSb?c&65c+g31f,Xol 1rxq wPTg2skDue$. x Y g8YwSl,h=$ ɓ~t%hoGqA_|ƆZ<hWr0aVUƖ{YfEP0/bƘ[9Ot~Kxjƣ2ī]ˬƶM!"d~ƺ)UEfe}%-ޏ;}Je F0Y1rYZ{8ǘЬsx/_C&k13Ģ-1*RЁmR.Ӱ R;zb 'KlkB8&~~` $,,PbpgQ\-8J*NN2n-K5Y4B'pK:144`L*1,` זTA/8%69" &XX9&@'AT?H/'՗!EZ}*HN'3`u|JK`&L lO4ঢc Ew݁TzrCiS`D[W0sHx-Uf jllWJ/MpO2@c&w]ۈFw 7vfͨ*}@H1Hj97r?~@3?9׼~)X{Adžv;u0}`nj -Ys1OaK|ǐw3[@N6ǔ"~1 |)ǭ ('/^ma Ƿijː2O@$4:LTǻ_ˢKuV9!6CǽZ 4w=:ɉG^ PQ*<E4_sWዓ.ykа8N!Ζ(6N&jfCȄ8!Zv|NzkPq;^nöA|`rTtIFρylhKПtWbU mS%}&V$$&1`(7, }^~q`R}Q-)@&g;e0:h=8#|{Ku\XT|'TKP-߻{2 Ug!אܯ)`~vdЉL1&ehLjUu0"m5^sE*&Ooi?9)ثawpUK7]>q?y)=%7ҩl2͐0Ț i hfȣۨwʄly-0xȭ9՗מ6i:ȳ.uNF6`yȺ{QĒ;4Fxt^H54UԈ +r StN܏@EJK\ E|o@ˌ=s_ I>eI5J eX[aB8sUjk" BYT+{ɴ @S ,9n]yقfy MԢy)7& 0*rjҲYx[c "NDYGP(d! *N̸"JVR,.| w/$EyMB+S.wOY?Yi:9eڲ{Q̗`:, % CS=NjRnHT0Iر@ >Jlzkۼ9ugW8|d>è XCS3l o|ZL_Glj:}4n*h`&)}+d=m4 @lS{T9Zqi>AHBɁ@@m?GNɐjz$IɽhɨsxgSɩ4k7YEzKb¸ɰKsv@6 2w'ɰ}eIAL/{?!ɾ&Mlq @beɿ`E@ˇcS NSyJwpKn!CEI0Pۼ b~;4?xܵƘ`P%}o^݇:[ 3P$n݋:xR⯌>I+;/ pհ>x .cA\@:6aJ#ШM Wִnw#5W|K iB^ H^cчdZV_ EX'{F=FBj97GtFS "ft4W9]h[ 5#SkfGIw0Jg'0m;ݡ\I4_~չGĦ~ l4`a0&<{p`--+GÏlƏV,Pf $vqT+rH6yQ9ږA"7Z C(R3E^A?XrE?Ơ^)8uLd9hzBpMRX r9BQ bxS;TVBʲ 3z3&S0|TLUz!3l,]wb2-ʄ yEz? 0ɉʈ^ɭJ(ʋ\ n>o{kabOtʓpcX_:V #2ʚ.xfcCݐ+&ٌʛx6sSB/f ʰALU3?.  ʵA/v-R.󣱼k\GDR~aA[|Ʌuѷ1wULEE% MZ]58z@&kRa޹$w ގ[x!0 >aʅ2Z8IXfI/fd,+4$jāЊ`@R4[$Qh.ݼ1<oH~󶔨yWZ=Ej% PpFS@?ҧW26*.)ҏFf0DJYu.SսS[WŪ |hcI$fhYE3Ttˆ9>-t_Wez-0GEˇKKUkZ CjˑswǴELX>˦X\:4mR.I˧?dѷZ&Ts˫F1ı˽]8'0Ȣ#{-˾rW>V:2QITd34/o~1a3MDy:У0BgS&Jѭ&N5OMImFuܷᷞm4Ru?En6# =IH]4*"6 Pۥ%Ԋ.ޤa~EU6jHǧ,oЌZ]<[)G+QTU9'G NXp(y2%fLc/<"-~= hǎs/&TJ,5-̀75aH,JXẏµoE>R̘#E+kEt,0*ṭb}ʞ/'KzS7c(2g'Q,2xB8xSP|4ˡ9ƥ>QC8yU% ='=$8CeϪR#@O[ w$Pݢ)9NibU IY-}EP )ե!CAN槠 ڢ݊auԄ Gwڂ˾To_;XB,Za2 !l|fV'2%}BPTyr2&W=>lj NldT6w18,PhNP} e/'ڍj~^.h19g/`+ad:f &Ӈ885.+;m\<G Z;g7ܯڹ$,x\tO!=3}E9`F/lD,H͌ZcRXvv:Qr͌B&wƬBn7f͍t+};c͓\CW` KM$͕_Dz48 {͙r["N͞SX<23B\͠{۷A?@,Է1cͤL2kەCͨ$Sgp)gdoZQͺVbRDbKEN]~<+ͼ\zH׼]Bu$ͼbi7] `>#=lj{;rJ䢭{N~qzηHx }\MBj}X16LUJ1mV-OШy2;t71Wb|] ~sf!J> ?'{~-m3 xJ[KǮd_#cv;j!:&jͿ,2gAJd<[72OV7D!Y] :Y|plmN vAȐ2U,MS9HHA+/"{ k8gK+"Ѭ-J&v"zwRϱ"fl>1W㶟8$~"ECWܤcz ,+I*Mۦq/JR.c osaP"oRΈ_{=ipΥplƸ anΰno4U8 F)ο{ (Y3ѶC=$BgJ%/`2\ D;fCIghyoޗ+dWz9Բ$µ_/AؗKBX2sI[,U%/M;t7yjsnRwnes}?HlւlQ O/bu1x#Y8j@"Kp,$0?<T(Z.,x킦c")^q8w4"L1^>p7=Vʒo$l& 7ͤV\!@b:\>YԆ>DImoW`¯!NQW=;"{e孚4> ާ;lS$lwUnQ8ǞrNpiH1(|6< s!(,:<Vt%mȻ-;FhU/y@vO f9Zuσ"TdxzuG:I'ϓFgM5|jܵRϭL͏k\A#ϲn&ۛ!ҙbHfxcӮ<,y>5lAoͯa=UR[)Cv+&t1%@P 2yJv d:ٷqBIN I/ TSVfi_jw:JmypxْHƿW<4N6x(*/;< RR7슚F'x"'B x:QԤZPP`>.Z"Eec&$j*GP l(Y̞AVRtP5M[3ShJ Q[GID?uJ:3]z\]'P P?0hk6@sf| ]|tu."ǹ_Ёz}5cX%TY<Ѕ.,_r2y VIЈK5{_746BУܲQKfO<ޯdмޜH"͠ `!ׂib&גTQi|uL Ey˽ :|}X 67SZ@i5axe*Oۚ Q2UMX,HG WJӳ`p5\<h.@ vw=fzf/21e(4U)RnaO̾!! E醧{":E/V@t~\e]HEgF(CT_߀F$Lvu#gH:Zp:}^dX4{[)")?VFvYezO<uuwы[/!;3і-V.8Pј a0&dlѧ3:Oe๯ѵV81lzkѿ oD)΢w(Ƞ~M2%V?x HBڌ:?&[5;;@Jv]Ro6Y̰9 i/Ÿjy/5A-ْwj0&{yĐV% F@?jcEjN^.k>.F i<[A&4pv"AWq玱6.qJn>RFyHtqYQU $P{ʅS7$\(^MJ\S'e,$] Qe3|ҋ55CoғʸܟgRx6^sMҘYR h9+]`K?g5ҨchQSxMVҰBF!Dʪ+5 Ҹynm@V}3'Dҽ]h3Hl^6 Ķ@il<"H`gD pKsDΦ;Kk;v 7>`X,l+T.SZ~t֖ݔD}X<Y~pLZ xWZouF/zK2gJ<xΕqxn4U w=EՔnI ,SM_<oPZ&پ-fv*ijpNx* {0zQ6geIT!$Gjy}MBr5iKmP%uVݗc[-9GE 9 %ҙ7{aGO37AE2V1JfįiNR~z4 wBFV^sZ GD:cX`bMe>uQM(AiSv6|s!E^-/|JpMف1 Xs:Ȯ,|ٯSSOAo۝zLlu;fU/Әb</ikk`eӞ6(k]h^}GӟMjϖx{I_Jӥi$گtr|Ӳ9Fkl ӿQuv&~b6P-<:Z,3FؑOѼf0MDu ~jH\md( T P;,m/i#Юs$  p(MI'0jĤ&{/4u>>Rں$yp1pQ>&]2E$l|~`ES+} jP< L/sӚokn+ٝ#ǠmԊƈ_ZXX2)Kl+/4;LjJӐ%= 0':5bZBoMu6fV N3?ԼedF_ӲRsɟ"PqS|n۳*.NUfeG?t? H^f][2Ƕqvԛ qQ<4ݿĻRԝ׹]f?5wzMGԡfŞ( ʷz03|N`I([X'sFsH6iHPXI{+_h58v)T-Ƙǻy07.sbIY MI0e&,[f7&w (^[60 *MI̘6_ aRSƞt.?0{4 {΁E}B$`ɲXcWӽ?2L%A!9nXÐTѽlH" XOTXL$#^w=h [" 7hB @#O׆{p^gX?3$-.` 3-'Ywi= {o IZPm0=;$>I0"\efWXH6g]%㢍׊ggV^/x+&-ڵgl8Ӡ V\!jkwJO)Ktn77g1#˃Qrx,:  {<χ_h0VjVۢՇإݞN I>ՊYTr_đ{j|ՙp*U|J՜ేjg4\՞4ۻ[#F`D=sաrPjI4զ)~(Hyժp:1+#ng[% մ/)% c_յ6 ykONվG]8k{IlQ%_-Y{o˙튎--׋dJrl{E085C >b :  _K=CcP*/ l+(p$M@ݕccz<IW TّERu|a@6LV :e͍I/>[#CI"m/Oj;ǼfM ,L#=cE&rFVsĔT{pO %$ CXFm"huCUI e!-HS3Ƶ!'e=OO+6usW[49`7"vgo\?U\=}g0 &YEiVs49GB3- LF#YRs? 'UP'ꝷ;Qgo:OsGlW&#UGjEv: A=9ާ#{优$H3qO+B |&X$gYp RևdlX<x3z@֜\?c:_֞ 6 BݡK:9a֨qu߹V֬)rk2'wG¸֯/6.Hr>)ִma 6&<h\ֺ=Qe̲Fh/6AbV]\Z-$|^2.݋ ΋7FOt6 a9.K*Ř=p7 F6Q`na"T4!&zɞ$r KW l'Hi>} ߼F0},u7*9un4mSow?[:RQMPA7`?^-H|PռM TNG5lJ#pMX9-nLЩLa25* /Iyk]v+joy4ۭz.$s{ l׌5$*o2 ז 8@$z mם~5y'.o.,pR;ץG6&Pwntbץa6YO_-צ1Ȕse0&Ċ̓׫\ZY`|EкZB04%%D>q yѫ^KغQYI:0:ՅLzq)2<O>kUsAKBjfcVum#3b(r_0`$*E1 6p.j">Mf X\B\PliE#YSk4x#/hCaP,eREpF//]Y!֥d2ɼ(si 6s^8%<x3ʼn ? n8+^T_V["Bk~JDuQlLѷ)Ԭe*y=]HM\'7)s=$^)O z;9`79@bޫmpqZcg3baIkؐ;́X5HؒZIr&ܺ5Uؚ RHmYNت|'ΦF*΄n;uخv Z}rخigv@hKط|[om.#ظ7qʖiʫO>Oы2v2 {81>%78aSlYlEJ䱬}D^vGutk7G Z:;H''D1 Fg#Hظ %8KW +~_ՄX* L)z$eh _rI賶PT_¢n~b~ (lKwI#!VpjJ#,]j3nc$0"zl1c?sv27i+0>Z=1FfQuɵz09So4N~&;:o5VI&~]C&%S`(ip!Xs!lY8KEC`\"Ӵu S`iFUy{-!=ն~{*GcG@ s,`Wـ*̛˜٫e0فb) ޶?cy/Xه~D`x %\̍ I ٌ1Ӳ^e{6v[dV&&Nκ=4:z\c]j MBI@8bݮ(2>t9y@G3qOPL-/L83-c]/MvIྀk9Yj@ɥ?)7bU6 nCHTZMDP7lĂyMr&hߤ|036Hvi2sTQ%\3u VV0Yɓr?ބ`#X8"w%]0NAT9"xgu),\Gdh=OnZC;냲nƷf h%ڄJGjw`ej{nڔ`38<&3w)i2IڙR$JnWI LmGڥfqa_ڥX{6XLڭ~D^MHAڲ+坂Z;YrBڶ5Pgp x{F\ڹջxwЏ"rھop6ѝz?'3tQus۶BJBa4ՔLK9Tj zN̓S,G6g#?O䧮 l62=48ZrFK57"_Ҧj_hLpa b&/8;T4b }Jv,\< l:͝Z=`Cx}/sUDA 7pĻ.G=d3*"ZY< <vq%Zu8 5?b*#WA 6Wt6;`PH,'a>@+|9VEDʰ'KцIWY(5٠n2M;n(*R>d"> W LCXW`GH[l ٽHy] ԍ^hY%i ԛt@ G#y8yC.l~NT'"ۃ>@4kŒh7_"۝._  >aal~<۪Wnv3L۫H^,V[bLۿ-`ȭ{!Dtk ѣ<PJ[Vj;ˊqGu:&>_2s;)oNaJ'$;EyTykU}󷼛G `ɾe77 [NvRb2$rO^P*dy{GXLĚ\f<hӸ"f%g86ZKqRQ] Fq[^њQS6yv$-B\c qK`haШeUjE2geK`r Zu()vYWģra8Aj/Ves:ol2-m|3$ ܁ol%EINܕ`./dVEp3sܬcJFy0d=ܱ஗?sXVܶ1JϰL%H7:ܽ~T.9;VàܽWsXŭW@,# >-O ]ɯƐ+Bg3M޶WM/~uPL+ca 8npM7t"JՒ?_!O'7 ov(J^O+-Y XȵջqrnB4KەT& .jAQ{M͞fiċ? hd^nx*<}Ŧ2\*B/7`z hIr8|,¿i2s,BF?(=8+goU"^v,&9m)T>|ZNNWX2/!Ul.{*|V!< &6j9fAV4id+=XyʲK R])2!̇}3e<s6݌,@s.NL5.ݍ>+Ő2_.0[ݏ)0`Y=^ [ݔ]u#ߪIWw@Hݕj@B]2ZLݗ)QtLwv5'6ݬ35erݮdK*" ݮ_NL]{DGݳ*/j*x="fBݺP3 Ay%ݿJvƫn ea BZ7lm$ ?gk]=wݕ:N -lҏT^]ŝ7<5]oFĴH쮀 i#Iō>60,̊y|ng Vzag '(BRH0yunZ7'ӺHe ZZ`]{4 #991&`MGV/t'ͧ,ɪB㐍tN)oFgo޻ {*&g Gx՛N,I;a739Y9.$3},HyWbtC;27sb_D!.gey?+x=ڄhu'SLS^_|qUV1PЉe;g*k\(#ka6w$8Yq]3{<1s!Lc3`W!/M /L!.A6a<QD`nX-w0at/~`seΔ'Z&KQrk 45H:H/qn(0'9+ˍ곀~c(nhY>Uބgd~,R&Os׼BޒbF=S<[zOl}ޖ(wCJQ<xޙ\ݰp ۪ߋ\,ޙrm, n O ޝ@ NV5Tޤj^jDqaޫ0ٳb Y\_тƾVf ަB-XQ&͹&DutS hL%"$98'PjlIݤB2D'zenXTin1H#@s*~b.k̪=bbt!xEF4J\*OBgS >\~q(r[kV%[} 5AV"MkJM.RӁ5$Ԩ/[v-TGan^Pųt$_Vcp NH)EL[ G$ˬ` \.;]N= ]B,Ĩ:߀j`"SpB rD-$ߐ땵@x2|; qߤ)0۝ugυ߫<:KsϭR*&y)}߱睢gO&W3͇߷55r,9߹x78. B|a#߻~F+`ERKw@߿VĢ/YJq'U'. i]T҃|N +$ `T5@7RV~xiy 4߁yELjpj(ȣY pSM8vCx0Z-Lpz̮̈"8+!/AP6\ Jtr Qͳe*~?f:1<rqhr]Qń4H@y?+Zk~'^APcl Y,H-BVF#ƭfjT棦Z[pJD ͐kwh9PBYiy1HVhcVKW&fd6 \Ugg6R$z !CrMKǟ;@ȹ&s.)ZR"u_TuozfC̲g^;J)af>:NWe-QfvN9WRЧf_7e|{|H̓XFԒ[oT"Pj"{1u:zkMM0!{)aH2G+mGu`˭ .PjC 'ǖ 4K9!V**n#z5-|+ ;(79,oY-&/rZe5%YӚ87&K ۾&s??J :VfCͩSXTt DA@ F<EWd|H. H@a!4:)#1 TP0fd St- 9 vn_ z쁛gM} )<MU<`S"2I-&A0͠//胧aᐑLz3Ԕ+o)E'B3m7HbsGllR?SVkᙩb7-"bf^-ᙺi ՠ%IQi( "9c>:| qJAzXdJ4#:lAe=$a%N_ᶒp3u~|?+%2ʄh7䢈l:Z mBR?Mn:XB1} b 27\n"cӞ;y"_j.?ႨzGe|k&h2(y!jx&7(=CCAjc/cps su4܍b( f~]J"9b%jBA %(<CbpմzLMoոm$h% A`Z' ^^.@BX$cD`4>{Ggv^7.C9m*=#w%gE_H_mm=+9 O}#ڵ?YHɘtOn7=FTyN^w4O▥g db=)Rih◙asN)Zʐ-0Oz,/tnSI\qQG 35ۥGgA`⣨.ЛHl]E1ǻBbUmA&5%+O@u[ŐXEtI>Y6|#ǣ⵹gam|F#dmC}~歶`歮#A",>C \gd㓰eCua{ KD7*N]7SC/b{QEٞךzEZ)fP堎a=H9C?ͤn/rB3J>Џc2/Z )Vtp5α+sx E?}J|:T_V |,4l5)rpKGq~l`:"H,"i"F{hSq3Q?:̓`-IC9n R}(ؔhLD7j.}7HL[j=jhR/ OHQ& YHǣŖNQ'\UBA}Q6gCdWo* >8BwxȞTXxѧ}<$mȓ\^ (gP–^BuYTD4QC_UBL7V ![`E.#&&{3 AQPaΟ[ յlSf2d:% ϩS#gȤVr."Amm1ƪ;pIIv ӵ>H1ػptㅣEjL;B!4<t5)Hժn0i,|R,~\ Z㗔]uLHD*,&,㤛-/sK)A~㤱[j 3(27 ܙ+$'XZ+4Ȫ0ft&FB/Eސ3:y(@:㴹Ws9V::(߾až!]NHDnGwa}w(=/ɪhgW=9h;p-~LFw^bqq+[ Đ>t)GhERdӿ%4C:be{:ClS,!a.e[q,#S|0۰LM iɝhdKNBZ_mlr<lva w򨸷N9aBbFTSS,ğ+unGee.+2 ?K".hTu%LPy#9xKJep:H&ChP 'TB:BҶV#"MI8rɄ8oiMӇ%VS4<XU M)P,H͋?+bnzNeX Ktl]8c"ZQ n{dϗҮ\q*hE/~ V?IY~yxzlw.^@-:PA2䇚E)c/[^d΄th,.y(dg 0!Oϳ8;諾ќL0B (2C 䖎&R{<<!A\ډ$^ O&h]5䗷Y e6s*pڗ5.V y_,i'K+L[[o:6b$䮧-3%*͘wЦrZw䮿? CTuchtY:hpɧ# iGʼn6&#l1)*ZJ$_skVݭئ>YNWtJSZ1˄i3/Z hL< 6jAVvì8(mȅNm[za($̷~ֳ9@8ƥ2 ĠB&ɞx_j]2et+(/vS!85 [.0@^ƅC/N f 2'R0 $`\ɊP9By6mІQ:Fs<J*Oy4!>+ruUի 5Ah’gL jGض'Ue05'?JRQ '%*5Hf;(1 ?jIbl]#PNbII(2Go@>[$ĶO倗o k@LMB0n嗪Ewu+_Ԥ*pgQ`I&0y=}~dd_b6ⴴ6U嬎d:ds\-LC3g+ "Ѝ8q5c4bGE-釥ka@vLx8idC'#ڂV<@;>,IFnnJ#|ZC\ ͌HHgx\lMD !E@8QIܙKV}1Ԩ , w/<esƾ.̝04G tHpuAU4'/u.m hR]\p8 h[K8Kr8~&(JO|5ddS*nrx!B/[x$aAq8?H8) c[~hs@@}1mАn,P{P@}!KPg5,7S9&T 5_tG2Zє @^)S(fG~{Y?+zιgmb.,`Q‡'A`l{5{wx`t@15-|d]aJQ(rr}~J _ mz慎{;Q <,v-"W)nwF 4YR6&D[;PO%)2p$Dh⛲CK)wZS柣CU@*.+M[X.V, (洀ߚ,,ESS2_滀(w>F9Z`ss+ ײfg5aO2h臅g/Z+4,)_ю] 9^HNFA: aPa9:%ޢXpn=,>ۨ$at;<'Nkޯ-z`. ^Ɵ $ >T{EŹ?ָؐ[0>hW7 mw\7*6v: } hC3?80Z {69NНKb4E?)Na i4?/'$Z/.WСF[՛m=Y$0  ȅ&v`$|ha_z 81DzηhchtrhA^m͔=:ʣa"uNDSwd'1u"`a$-T0%Ҩ҄0^Bs2Bz\ҭK ex \S@բ|~׆绩 ֽw *lzcqb # l|_^ /^Ŭ|cr'ɀ՟Fut+̩z9xܘ+Fn9iKL.]ki!%֞:6u$>-z&-̗57a&$.:e%NSwnk%1*'OfX:$ܸC0Wth|`B"YoM%bzaa\KP.4L*5>d̓0˶z!VG͹ ܾs< hpgYbQ4#S3/~TCE#欆+$s R`Vd8$"'vm8o)=\A."!t)Jq=,q4GžrB[/g8PvKW uê/8BLSfm@/~fWB\v'Y ʮ<htuVwo E4iNz<" ?iXbA/Q脠YAW.VS @U3wk>v:Gy1i%Ԏy,}^#`%+uz@r>Nwplʍ>Y >,ˠ+z~h[ $K=Y^跰Т:{St)`RXn-H' dl']`aFgЇԘ*nd<LIkc];Y\M҇Qv]LWD")*t4<e4Q4Y1x 82hS;&+e^N!r5Ɲ[I3"%/&mRXᄬ=nC,Q$q[:݄]MidM>6>fl ԩuY%VYp󭩺07x屢#žQMl1Y*afjPMi.kњX$$) a[ILZ1OAYxo` (9b)5Lo =d 9VØs5DB"?fQD続 ;'@( 8+>܈ wF $J+@oϦ{sҖQ@Ml %:%Tm{СѪ"˳UJ;8`>ZEէM{V>h oQ:)"Z<K*$ޯF%_ rw_X?(iֲc'V޲{J2a.i ZUsxHYSsjGZe/Ysm BoXV3I@2,97uK x)Iכc8xK;éWm;y鏋oߧɬ9%9P:BFNDH ;%MF+`gD0 qXIF}akM$-yDzyHzVUKO!y!(|`bcVȘ=QɷEE;Yd_<ϋC>U#' 4o)^UZ<Ou76k7{lg˗7exU$H.{ UfMI{Z zDR9޵*^YޘlCDik Ѷ9c0=8.mӖvcX޿TW7Kwpӹp` zbs+ldzV{&)zGw Q:z=)0m dX!Jnl,t4\< $yg6꛼`"e\~Y&'wF6ECA}/-:<5,-X!Usn&B韍+`)̐` E0}8!He>ASXmcܺ*z'Z$_ՙ0~KQxswEPz-4d8d %tRd8&jI($$-RT>XzMG8գ/Jk,*fc vq#|[ 8#% hx]hxCef )* HVhsQbEtaE4)aa"SHz&ܧkQ +F*39.39}ej*W$x=_6ʊɯ' R~nqwNLfU?[D}ظH~#ZPX00ye:zV܊%p`B]FeNZWY_`hփW[܆` UMbh0|{#ޮƧikSKvZ$󕠋tR j4<2x0@ϑUx(vۃlJX p,{ׅ=lkWll{g[/*:X\?3M:ژOE'[R۳CfRs`{`2 " 1'W6=jI$"gʿcfHrPڮb>`<Ql;o8k僋b4뭯qEO%@7<zv볍^5,#LyX@獫&awщs8Y"\qtA@2%_APi5~Ҽsǥo6E(@i#f<sx ("E0 y#%?iEQ"Uӓ=Y_) 5~ (#*w :\oBN`8]Bp*62[퀮_FU D¾R7&rb5jvfjF%$diUJP\9UF,0H 0F^0?4GWDKCΌNhQ9vmXP1i%WR~1C+ħXG}<Գ̫*>R]([̹]z۱:w-!m[n$m ^ &.t;%_u!Q%!&Fl{3ٲ>aգg6IuT:UO@N삦KJ ZzR%]h BdK3lo}>o49F\e1 싢B6=WzS9v\#8ﲄSq:\CEZ'!5] Zc}鴾Zt^-һԬqhFE{M8KOYh)~b}ȬnTilg_7WZ\SWxɉs)ZovaZ2s@14f#u'Y`Y@R-峝 Zb|u]auCV9VdqgK<S IɝӃjP>Wpd1Dīf<* I `?R %P'BS,~z }HӎmD\չVn!F<x!‰e uhs8ݿ0XaIY KK ѵô#iIjE_VROl5oaa4eF,:nLzF`TZ&|zRI][V8.Ukד?BX]/2͗wxiE\.{D# twk˗4PxMqs\b'U/ rB30a:s Yr%8^KFG9W GZouZJJ%gv *Tf@"ڇ)^C홺m-8H?#-BsIhiUmPXQP-ȫ3!SYҝ 5N`.? e]Ԕ>kq:>TLaډS@}sۗ|QLN?tX6h9$&pi\Y|NW`|,Mp@ "^] BW -ԦOkq 㳜t>4l]>jG9Զ СyJx X.`!O/q%-SrQ1$ΣQT3Yfopg1i*:䰒k-EHt^5$DR-&%=OL%:ot,|I[nM=j1Eu_E>sn5<K3R=r[ZK:QW~ݟ n\ѿ4[XV.1Q$i  X{5#$]ef:Ѕ2;k:+K Uln&s~V'rnXuAhzo  E`$ Wxhõ8fd6gT2=Oc5*Бip,q/>HL}Qb{ks4 :~8gڳVHT5+|h;D1[M/QsT#CqAE1dqex!͑o-mbk^gԷمOWDϾq3Ir _<>2]}bjޱ@(:+=iz yA UʽJe2"#'[]gB _%7rqf-(dU#'4ٵGP9JfDQajpLb M9wo{xX!(S$C "9CXF\x ,UaOquh7OK._A컄jNvr"x5!%˕S y }5$؂pƉT%D:+^Z*+Zk9f90+#FkGqlQѹm<WGcL?Hb=9 !"lì1v`qJ~c8l;C<?"ؑ]5&'タ<#q'W{iDe  :)AG r;e*d  Be)`.:CbO #37][=sռmw֍i{tsGݱlTfvmy4? pS3%*xappȴ\.azWc6ƚ7ZD'$2 6zŐԞ"eJsBxU 76\ A;J+I?~>7 )!<MzG1g['~~~PH) 0<>|8\i>GB =6CpR ;B3P†m&QgDrĹ8AWyt^S5RӣwpO:Gl`Vw@OK4(1:|!j5a^&WLEŴ~8!zCmzm wY>d-I"-Id.KW 5#Z5 D~MT nmܹ[fܠ=گo:P'j!"τ!A:PHފ1|m԰4ę[c8d˴T$R%ljS7v-v>Icer }&AX -d7D P-V/1+!;֥3!>chB^U֠;Z̬O/KǍ@n`{8MBLRRܨ]pFG2R0@ej :P"BXZ ]tB&dͦlQZH{-pՎ<ޡןV˕#rJF[UppZHHd2ݖMlm ȳB <r5T^~w̜1W# CÊi5"@ӃU呭I]ϥxqmn{E:.OY"a񶖵e(cPqrT^+:f؝TN_#)̪GPB(P`O}sXwJE+i.,Wkl58ݢŇ̨#}f<v i'"jRUd~ O>T:Fݗh?{_wΖ&+2ߨvT?&7;$0QPNj $3V4d K~kQbeѷdAJӁpsGFOJK܄[Ks&Ǵ}Ƕ/[AE9 (m=N{5F.i>I<a?F0Ahj21;S]jF8i=|Q 5bhQZJF2ox̪f2 Xٞ>évg x6 g1CbgdJ[%of$֠q{=L70 ,w0<5e*H]⇅0mf`GKm_]zjDtM(sĬ^-@nsA.;7!򠷤s$r5W b!M41hg4`Ee~^Y :.VKq<آ/ytjW,׹bs g.l~<G đO،ߠQ/Y"lWcq{/ ~+y eձ+ylty\1G;mZJ̅ܚX[h##3gp3N?t:QdW_m_d\M~<c}N<ߗ`${m6N<voUM/ytVh!TOd$kԬ׍:Tm/g wbKj ;iC2Anhbòܻ M_.*+uy)9U 0YAN~ZdN0J`o[UU HM jusضXڡ&1\:.:GB^ _DzjΥ;Afp,m>9Q2IJ+mMٱqf8頛*rBhޠ}HPOtL3`R33gdt?0'ʤX6k>FZ>:ΐ3 Y$FuQ5S^>q~j#A:_FеM"(_tB,󽐲Fh DN0syZъU'o΅U&TcɪӍ˕6u|d?ewSVMD@t^EZu59rB,1L'^z>jjgh^G ҇]&O(,)-1ʕx!f|8+4qg 2TX^1 ōHo2%xj~4ջ2&Nض~Ta8KΥ=cˑj_; &s;#pFRh4fd5䰼󄟄lIk782~laxVeyhY%KGͳhѷuԎ۞kb΂(;&\>m~'9eEʰl":I#LLz+Fj:LBjfEMRZR`*hNm {CGC[lL&rdJB 5-BݔWUUCQ bx=<=3ʟdTIi*g" -E#iP^࠯5/Zi7Gk{@-u LҠ>X㧶) Mϊd%$0Th/ʯ1gE~[awEBzJMK#gmW+z#XT|7]q̔d^J*/nv_F;.0t}A-E;kj]?D<^v?05Svʮr[6Y<pȜWDM8Nd6xkJT7j)=ȾZ cKW*jˬ}~S^AD NlOr'8^?rksrI}tl>5#uoD; \*BCkw<Q'nE[W&pIyx­a)"K7>(|A.MM/ܲQ]+y][ImlGy;b|{4J'2Ac ZA0*"^ "q_$gU{om  ['5GfFX!Ii4 ?UUa_Lg?o7+(%p'OL9t4idIDMn1M?HX&-7A7 / i\UQZjk<Hlc:<uw[ujB.3" inp(j%XkXٲ1LjptłRp@:uB8dcNpօB28XCJlXCL!QF@'͒ NT8F;'6\N(b>`&#?݋X< b)!jas%dWUbəd"i gFQpəXP=slkU¡ ?k y<@xfT^^ՌG(RyKDs99NJ gid]׏Nj{<Ao)Q%WM33/O`JV`05O=%AŠE=̄Rdޟ3N'a{1Y;(]- m:QZHRI+ި!g'"gy*^_=S+.Ȉ$(~w~ղ+ ׇ~R&ym|<$.dpw륱f9  &a*[2"٤% Σۜ\~2Qt\ΗSwQE MԓyE'|ϔguQg6\ k +R+V8|6M¦ίQ@LooK'"i]nfIxjU(Q^jwLJCC1-!Y~NVrq=]=FuÇ'b6M#5dT5ŒoK1jb~ ՠ-zcS;Bvk!)~'v m7k%"ܢp"A_Yv^7%CІd/9Gf=>L"xrZMz+FܰEv!|K KlD[Ry"/F Y2$}j!ly3n"K o)ֹ3`EWzO6X3`?gkXk'K `3w>2u9K`dKgCz GI%nH-SWeL]@H/=;n e"O1su&\-#wpM#cMa9a#OAJ3JT!>\ri|MCݙx㸋AQK]a S ƙvR(ˆsT-q(NdX8~+%u~ CK{TXv]l*1fŽ-fPgW5j,O%IlDyO7M3Aa)ͮ4UӤG-oO` %6ٮ X.9mٲyɾAfLo/Zi 4uಲ>3p~W`h aWn85S{  W@Hwe4+zYZӢ]`f!:/8xةt8N:igK'un]= LϯB㝚2 -.ǟ!󩣛 'P-0~oXS$|}81|!&Vň<\796ˬgB}'Ӎn;:8`ؒ6c4*0: k$OHl;ߢzRNtf7RԱnhhb+W$B-_:yo3t:aY<F[P}M jcar-ך}k , co ZLlPOYVR-i͸`~b#:2kMBEp\ȹ y_ torJʹs<|+;aI)J + ?K' -+2?%/eMkF"hյ$~c;*$J.=mzI  Ca5Kxg/'k 3i\l!hh9ݐoPvxF((Yn]Jd^Ng" ԡ4C5jל3±'?!h@Er2,R]+W2jÎ=M8R~CeMw(ޔG:L+ 'Y0֥Gi?GyxY!afkIm#]aJ_'%z.<~sa~>QRIXVia7ƾJ>Qj(4P"@:oG0;B]+ DG̡;X.~OELگfV< I#p^&f5Rx,C3s)؃R ccx/PBW" :tjX!ؖ&TY6R䛗~%'x62\b#f$ քQz{WFg(}1$ͳV ;TĈy<lEZ)0`O/$t`,v6<osPlZ*,]ke̋JKLj~=_/EbT\ʼBA {vc83{LfI(8_#!A!$Gn'tebsWƷb ]&w9ߒ4~x fěeA-&qՒsh8a^d N487)=:1s./ xKҧ=-BP3 Dw%g73K|ՙr"3ߴV7]F#wV޽ ??8R5F-*-|ƼH,&= +]~hyѤSPLq6hrH;굯Z:fb` T U҉į5wz_tٺ,@VzIkn,^%ld2 l^wӥ=>+NO mUtZ$rE1{P=I;us vGRh.џ Y > =0&3F,عgP lc^~%.2Ʉ*q#g 1 Tgo"k?f(^OSޮ}8V+n=<]*+> y_%Xorl\Gl`5AY4=wKFAcUPS)7T6 K7[֩pD^[pq- QxMCPVcC3Yd{#7ő?rBWτOi, ΒNg}$lHI3ȜPԯ{R6F/ׇwYe$yF`<#o( {u|/ d忦6]J8֤狲+Y~Dkg$""7ea]*lĐj|TnV5YUd*س[ DcVC) RaUQ$bTkVJ 7qJ>uynȒ+ofx665mԽ<93 6Y-WKNf:t4vb}d 5¹MBՖD>Z4耏v'?(+Nrz=KO6{OL` yTa%0xMVU4u6 zsc\`h17d<!f/5iz7_҉n$`kĜ2=#.oc>Pb<58 t=6s8%xhWsb جcyg vj|8\X#t[ JME:[5! 5 rdqZQIYѤ3S̕;Ѓ;B_${r{1B:.'l)-2x!e.1§NjZbuQ]k+Q Mx%NHz]ƺmho]% S570ßV|f(͞'ޥ<12R=KߥmCAHa%mrE)-Pӳܛ]TOHI`k7 TJ1 ΑJۄШӈ}Dg$JvUGOLҖfiU=pü h OˡtC}cC-Z\tM0E<6vPȵ2|Q j z3 HcuMn ~~jdtq3-/<2*"n#E(Tcn -ϛoX&ˆV9lSP88aq*D/v@D+x_^NZ ܓV]qoT,ܲp= #== ]bAކEٍ~RkS<P!u%?Ml'3&!"t.@2d?);ҕY<T&ԋvHq9A+& tO-Vs9ht]Q*)pU8sc0H/^6s UJ17C o X 20C6ǧ1 ~cczQt*|l/o@>"Y׽cClY"Ջ[fE`j,,Ԝ[aVBYUbct=F3,K) NPN_}i+6S$*wIh@Gw"FҤmH#M%@@~J1aoz%E&&]P/e3Eu`WMn-]Sk Joc MG!\BfaBu` }{9R&OP~Lj-hˊ̦W-_G,WiED' h>7l)ٕ n ;c;>8(ܙ;" #KO{نuZ)Q7i1Vuh`Vgu3݉4F/-IiJPeXL6+mxe; ɢ ƨ)^jpL$Yo WeۛG405䫁*>0wx, ߣvOcO1;9YodLE}y^=al8aMU; 9;#ӢVx󤓈٪IϝC[V!cw/Yu周=Lx/8(2-z`wNFcP*qs}Ŧ|ӐP9!~Pxf~BD4P\>6'I(<_ AXh(KudL"I.s4t[7z[YPqW?Ff}fR5aGT_R4vK֨gYl%ү=[Eӌ3=_R ՞9Aw.6̏ifq ,=8"PwQfH;F ݕ\W5V;҈STT s. <f"G]Ēd*ϔ&Aig׭ bZwCm)|O0`s~!N>!Rպ`v1'o0+N<f_VyN*&eF`T*L?)ٺDMQ\B Ct)nFwwdrP˿`pԾ]!Rfuv{yq6Wv_H;(@׆9R#<Yߞݖɭ ")#.2Հ0B#rF~/B͸mp'9s{5Ŷ`5vmX`^ҭC75*sl<>ꖯ mBAf5ٶ5$ibzmhm׋H @TRl ;9,RyvgeR;V]y_Q#Jf!@~1D<#RZسA*I:KK;=k uG;\]ґcQOBLS4Qe8kfjCn[ØHqfef1jf0h`gd`\()#f5#\O{VCT?Ýx% c.<C_V0;ҢróY3GJzY0PS Z=#bī/`mlTR3Gd7eSgPBA8Ro,g}3+躻h(pL\<%q5ZĶџW7J.HSM#Y@q Մx`L|ʖ6L0zsy w)F<<TV5L2B 5d6ܵ$? bﰪû!;]kP^ێ~)1,G&`jƴ%eBoh(A*w=㱶Ya>AOnzVs6]؟r![({(z.>g ˑk S^ʼuOWOZ[F5 V>\>Bof.% f7j{ǷOڵl頤iǟHBWl תJ<(E%_x:ydǴʑד1xOYQ =qpfH@XҘ(J(&<(}iŽ D?_2!EDa^Aŗ7j(&dx'!Q8N=Uˊb<0'zĞ WL`:OZA8 $D2|CeQɤq20C\IvÎcRhPOM2r UC85g >DkQ1*Z58Ff*l }/Eܤn1`'FYCnrF`3̉Xu'x ,ќ$}8T;q銝oW'A1$#u"7ƿ1_gCoǀ+q#y`8ǶڨQQ/sX1qN8}qg9FLN0WDC*ҤA+ԲfQ@d}@:<c #ea*BK]&e( 4S<T(S?)i8zsMF[bT֠W퀷x 6u9!4,b;Pۥ37!7g%Lls]sK+H5H˶ yaan|<<xaЮl32#h2˳|#c&YڹJ4Ng\J!础u.C{wʮOfu; T]n3J)04q!- ɣ_V7D!:gنIw])B󭾺,n)#~l}zNE;?=6Mj8?O{)E6kk%0";TRbk ɨy.9:WG߁8Dns%|5\CFoϣT!"qf ˩z`ڠ=^jz4Y/#X&c[WÎ'm_= VL+ 0~.Zե~7j,DP4b̼M6V]Iϵ|CGߪ(ɚ*Wj&8v_!b^mNs,.0S!PZtElc7>-K&dE.-Ku?р 4Ud1E'BU!cd)p>q.J+)Ƚ݇4PV<GOV~ו̧/@wjsA9pʻBVT9`j&#Ajj>bHb&!*)^Ռ5O,yYQ&'Sc;ߞO$qƷq`}O\c:dQ+se x^h<>26gV_ n>M/1aU-B9bx 0,%}Gճ]|>dD6gUPI L0m;Ul++ɼd" 3$HcM nj1I 5I ΁+dW 0Vck*}r; !^GyYD$Fu-dܮu;$$S{#X졫??IzС)j$>c""7uM zlh~@=Lh wGY%NѤvI)=QA_N@y' ‰+ [?ṭ)u̧sL}eGKs.].KcEIŎmC1%|`$fS¥aieKi+ [A<+E8b i L. p$K,'dޚ;&~苼4O%~M"/ $_OJٝq.EՅc!dsQdlA ȬF&\?zN?hC"+wƺ:j^;I{Jr:'oP| 8Zqt\%Oim<Kq tTeu]<^+W=rŃx̵&VФB%#bbS-p,09DZ.kOVי2ЃP;62I0 Cg<CIVnl(# HZWRp9@[{ oHCPX}#gz, H2PkZ#8zfze_6/i ‚f+|#)T3^R5ZMaleN/ ΢\,1;!*.^rqNu0Xi/iO]Urzr 5 hg'V`"1 rH*^[9 1B)B{$ utIm3zkb"TW4 #Wf񌟖 /h*bJ.YyeZ UH1& 5x W5c&Jz|@pXÕd+hu] H-g) #Dmf d9 ¤M4ܑQ:4:i_Y72^yH5&~iG T-CIDfz$x(k+#H$O[B\TppjO]mv*TGʺԚ5O W§^rQŮ1؂&1 n W5hT Hn* q Rц"Tbtl?nYE3N3 Szl 'zB^OsB;h;H @KV:og%cHM:ЉJkJ%S^%'kvDDla*V3oFD{S.r+"wCs>ގٻ?I<6 HhX nO;ӥͼe 'I:]BFEAm[P^4⻹tNʍ \G8=Cb/c;HcT.EJHP/zs$Bc90˓?D `i{(1DUX? n`>Ud>𸜠-$Bo\\rҊ`7r: o(ȓ)L (g LCG eZ hºKox F| ;[uΤ \u߻%[UcYIZOf EA@#>Tra—@ʒ]W(_ 7|tJvhx.m2/E?*$^s4ȴhb;[kd<O78L)W,`Xhj}%5ER@6. N .9lu"4PO7ZNo),z &u1N3%0V- -׽ 0^O324UFJv0K(i$c0@hP\__H>9g?|+nbHd.xfDS!5$#P7SS>;bR{†9wQGʂSqImWR>1Wm+م(Yu6^h0N<x+Q~Z|") 9y}]ZnI}8L=2kúɋhdIބQ=|Y/t4]Ō¼tR4 *kx5ə pne=F1I.9X=yHrHr ÞqkpRzytxYv*sw0W6bbVCnayU" ?r폒IC*R \y]}~PkkU_qzno$h廎CvjQ t_q_9[)nSL0$ +eu|/!h~\;}K_^+o@fXM%r΂(sS#T1 ^yPѪ\"rynTy+)DGϼ2k&Λg>\xV%qOz gyhPHX|Sxܚ#E$CO1`Xgm-In m{t^Ҫ؟-l`K6]ZC!UO^ Ԕa?8/}UgH=$( %uUk2`2fGUdձ-:<x\`U^"bMV2pK*Ao2 0~8BgFgA#|N6{tn {a4tGj83){vQ^i! v5UV p0οs: 9s~>EHōigsN!<@S:xt <E0ps0[1z8]X͵;>h6|hd!"n$Vʍα6.?V-ow9V>hNf@9ز00<S$_ YN|[j![R Z.E=Az(!uQ@Ч hgC<nf/7<7} *dcBgu20`mDҼ8p~]~a*>rDf_אY/R1#^Aˉ\sQT|VPUDF0s@)+ݠ&b_ jpG=>]-Sa굫s%4H ;!Fa؟Vh+/BIdDy tH˒tOY6:- ewa_?*Ę9,zXmI\h_H C <cr59Yd9AMOƯ ވsWeJ}tMF|~wX3z,=I$jA[ԅ EeG>Fie͓B "8*,–DDuu+V|/$/L?%&{ш3^g';8B&Y 4LSpY)۝v<ˆ%NbՐc7ߛ)i^;n:/l: & `C oz Go 0iN?1W)p׌,+4[C9"Tw q`\˚0\ș]FcTNJ<!&q)y+"=q=w?P ݕ+XnzFلw!pN&g>Q%#WN)H}o?ሞTZ @6t$FO&3Y$G+cWdhdQ\ #&5SLi2`pW,<3EHx"4_/+}=X5d,)3˘?MƲn)P,wg)=\m@xƏъi;w7arj"a m8q\)d'lf˰D8^;<iҟDDY$b:;}@_.(͑NUNGSr nTP4/빞}USɸS:VyGRc]&EH[v@O]Uπ0I = a<drߔ6IeQqGΦ@0G4%v^ (I]@0{>;zNXGvK.5st[X1N2l$rK[꤉Jk}>0ښ[D) h۫Gl҈L@Xr ypNnK0FUzy4SGΓMOGȤO L˼kNjD=i::QXH~K%$W-8|>eBsZ?]f}6xHXi)) X;w^}Q'W|w4T,;G`-TOHq_V챇ĽӶ`)$6M6JcwJR1@wz ê:S֓CcS)Oy͗Ѭx|qZ[6gUFEoA!jETyVvRC\`;L5ݩar~̀FX/Xmg,>CόKmV{rW:m$3e=:U!:chqO6TuEAmN̂ ,>y%//]~^| n$pbmlkR N.DB0͋W ڿִf9a1IaH뒷ݸ?Ok$CyU3{KPRಈ㜈~EVZVIZ,gT?3L8=ScMnyxv6WV֘9E*5thlY *J͆UMtJo!~ŤK1U8tB(7׫A|g92 sh11MER2w(zfh6~O0.ePtk R`k4ǂ0$UE)i n42MN;l:a H0*afE=zqDU0W]T``ߏcvM*?{cZy ugws<o uWVWV9LJgD'Ni|&ٗ@5~N|VB`k\*1,)wpF20:73wI(gӣ7'wh9nĭO}^`X:oGp.a@Hod@"<Ό'IV{.f(ө>FO=jb`dWe=& ҕb >n/!d{'Q<R;=|iA\H(O\(S'iߋٕW ́^ȇ ЧKI'݀{egeF{ (zݽΓӜ'b~i9 JԾ{a\{\QB%p[Ba$#`m|3ً6*2/G c:nקpieo6NM G:QZ] ?"74a7DnBi,u_ ZzD6~v(NuZs(Unu@Md(;r ՘ƫ$͐VWOu}\3J W2tX6ƅl˘֭*\;XpNFT.$jv1.kDxffNOr0<>?4`6cR/_` +d o7'uiQmwj^"SR\`{Kt^G䝺^io*` }|Ŷt?mi6^}e٣ ؒ{AtVGz*qaFx}8EKd x !NXETDl砺 "RG80o#4adTp32߉%O:iLY|oP~w[5g'*bV/ͥcG?TidV ޖ跷*PlWp-$>N-BY1@S7 VԞTi_3ݵ5R i q 4{@dТ_ԣA{}X@Z  sʎ=ը,0vjGǗ-`bvVd&kӃRsЪAhη<,p$l |Y:b.($aH.gd#wSAGӱ]\t\6<5݌*#'!WC|A:g4C;@_¯n) hZ!D㊇P'g9#uHvq-8b@ŭrQ=WZv\%6T8Y|噾‚}^ [;e+kл T}]\qXA5+~otJC5}&]Alu/a;.zBkF+mBz? A#@gb*nDG8=FS7}6 Wଞ {+ GXjR00  J S fw,<X4"}1_ #CɛN#|8422'/o=92.t/P~L{7t1ljirן?G[)q@[U]'eݪ7猟cΎCzq8;/<Kk~P]:FOO> D6j ݵ9ξ5-B6ՏD>>evGq3EcqCy?jϥ "OMyFң=e"r'' VBWR\R{Okz6pj9E`D5&^[Τc;#WJڅ:e*F𴥌Y㰢da5̱tnCGG,^x+<A||Ѩ* 7P)0qa=#z;$Ωf`)1?OXs۴!Nt7y" ue!+eB5Z޴3!nSw!Q,Q2\ %L/?X5yQDyn FO 9lqV8d҃q(6;cannpEY#ƟrI13 20<?Y9P3]Rո/8E0뢕6K|<ErUA=SozLgɏ: 'QӎEѪ([1OBwdBUlj\NU4C6{cU͚ݭ=r W d1õ,yj!A@cb֮9aa ؗѢ\t۱[nD;/NnZ}xo>fyʠ"0)v(FؓMxp}6;vQ4;`,nWx="@%:ΪNi)0 aݖh,*1uw0mOݷ',dKpxgvQ,"uvv胱GЕa7vjTS l6IT|΍z溿 &]^wz QNş:Ӝ m@4lm-lNsAv2G$|nLKB^$%Pr9 H_?ĵA@r?!HklZ1h-V%W_n򪿮00vGA}~lqy֮~YreWOK.d0FhaJlY|*+,>O} Iԛ-Kv,=LHaʩE\~yg]Xlt:#r;LëG+bv9M*l@ɺ0Cn$4 :2"Ibϵz O=#dj H"hs|rE)>Ȑ6ӫ s̽8iy+:<rI/ G},ѝDM 7,-_ő`;[,yƗ΂l!]"PAAXoVm"!ɹNL ~FvxLE#1+2r~FF}tCW82gZ;`lRQYKSW김=Rr/L?0<jiD㨚F/xUWdY7#t]qսö#Y{o<+|ui|Y s-EA[n1QtP3& i&ױNx|vJ&+!GPGc`BE "mjks ZK's#E!oɢINOGrj/N*4/B$~x=;4D_.*s@sO ȖfY~h /j o(<2Tj=F"+y50ce*?mڝ7}ӥ%RU;QWM^~C<vSilkx凋#ٷm`4G;J&yA`V6ȳ"`,WcnɈFVe,(  JZʠj=V[am͋ZSbx0|#oyA1.hI煉J'$;wg-X82;Q2(@6ɧPE{!ߞ]O$x_ӟY m N'"zYPjIOC8{JxVz{Pl)##(Qd+MzA\d/Pv4 .4¸:WrD9jQFB<\R4oݕ˘ޯ%WQrE6J{v\49 3q0$?>Krd]9U g_(7tlxq6~c? F[=.-fROtmM'x& E;]lo!عрbV6g鞅(T-=h? #29v79I_kmk,7߽3O+9 6Bw $?e_;E[1E]dMc&X+`X tl(uRg5Z}[&xXѿ 8hCB03B 1\fo^k>[ĕE:eG}묭пb^["mg$d8섁SnBwq4ΰ05oVĻ"cB|ZA[IȾ:H1ng{o8ƌpv|6At@Hj.(M ~:G, Lr]4DYcck,Gp*V~syv :'!5GA~KiyFAV K;;(e2BYъt0}h?y6tR4[pLrBr:2*9{zG>_eHGB&$4HJ;ֲD>@얙=_PmKq!着j$%kuB2 ("rj 0>sSNrj6ц"]1^|$]fO dAo'PΪTlk:~yi_FRЅ6Ti"5}~dXWX-WL,=ժ<5frs 9eJ\z_IOX-ƥ*6LU3}Ⓙ?B3\JvÃ^X4~ƻ6uќK$S2L~NeVŀ 8'a| aB⠮KD@<;@8Ga/W/,sT.>|.2?X/Q9 #mYa1O2 u8i%!7whYa[/dt9-q<ŭƖ@X^3E_'9[/1/]sL.B <zOϯIڎ^ )=ORKGŶ4˾h+mY!^Xp sd԰dUxwYkAt|:ܦ喠6y]e.}V?s/t"8_Xg]h!0h rҮq*HK.}vƽ17x*a#9l2Y:w'YG):~p=j0V<m 6M<,cuiJR&!o< VUqHŹ m߄>[KRZeI0Vn?-4%8}iN?խ¯zmۛO6q8) ӝajlv;V?hI-_sdb1”c Zt&^dB.YH`qVՅ5ήmGnG>Gd^3s0-Ndp٦3 :z)Q4'b./ZzKdߦ2n=Wdж,=$H@Yuk  j6(c{ZSކ])ݑ,_^sx tNؒ\~PbH2;kY{VU%+v~Boz=W1QJ3a-d^ˡPwf>a{]m4>" t1j$MN#Kq>Tvk ~/dMvDtkX F(шHлgۖrd5!?|U]r:/c^KQTJEX{>bay%89mTZ<oGj+?6c/A2Fx۴Daks+o%[M}! )9̦yTst%Pf9/sLHQwդ[qE .m&kŊ+5Kg5HLz:-ZdP2`Z^(P]G!v :HiRbYհ= ]HG7eA }?u|gBAYƷb]LmCEIW0\Z-<`Tk(jd {bVu;襵̛?h<ѡ uA\C2U{r'͆ Y0Ю̛x7O!P|uLւ=P0krxe ĸB<Dx'ԚFv{K~'G|0v4*|0ݔ"Jܷ<Pt5A\nf,1zdki9G"zuCqMUu"=y ~}U{[ޜ.IL%*z/6(9EcbYHYo̱kepn<QF9fMF?1on{~rWйfYmm˱[bnsP_'KyRzړ&ui>%_IX- V@XX=W}A2|2 <˜$z,tP*(5{ LɡZ괈&DTcwЦn\"Pr6 C0z@KDfVv(n%dߑ!X}hyg+IgN`a7WH>cMփ;m_覙[Ld(lrқ6}Gq3מJjڣKRWM}Yvz%w7AL\z78Қv fm-cڜz'+FQ*ȸ'AB`m0Ly캌 )P06gm·(ۜMVxt͹'γ`9<Ӣ͵T*2k)$HS"| RwQpaW@ z)ȱu2G#.Ϊ{$}{}xzbگ:c |T&O=$n AIu&_>HKNmg7ttd2."ґnJc̠(_pLJg2Zr/ \QAf^e@Ɠ2 pK`Nmi yL] ѡOY_r[h[9 cB^S|GWM,Ό{tI嫯/bjgvx吖W,7m"^ZހaQ^8=zA\I~,9` ~Դ_v5@0]_{"tQ}\Q? pV୭V&EXv<PR0T1L隣5z4+ܫS?X\$J IRE-vp![lWf|tM oI.\DU|g?Jt&tY.+7)7Eα1K<nɉ[jnnL[tLJ$ Y:ղw-3P/k#³x{>,3tsXqbws^MoUS@M2 r1^-(2rMk#)~T`xBv>a=As'4[j}Q_Gx5hHt5I ^1mYKȄ2- -іߞ>j~[iD]%+xl&M}9A;Z)5{@߼Y| j,^2,IiN'p_3c:G'!y#geq _2uzWw͎< laړT!;gi4y(HD%}-G9aa2}SB<+ArӦgAUuheNQf:tߧhS24l,v4SVڟ;9z<Rk ~62(jJ&F,b8M+ Z+ 4֠_ucC|ψUi^TJs\a,XQۋih Ȋ_bЊb oROD " dZ"< b{C37IbX 8 Tر~\PekR{]ܰIEbmL{w~G}aGZ̐sZX*G\r0qѪVBjkv*"7Ҫ= $[g=TDޟR2DlkD`LCe|6kxV%;c< S$3 yHa@LlQL^3~ȃXfتmG*Q7%T_UyOYw*#cGDSlܵfNUvj/Ja>}Z nߒRmLao?bD;Ք[fHGo?%aV>8k-~1 QrSPK7ͥOYzmZͧG¡7L*T0dS*wpNY45W t?+wҪT;بT/%zJڷ11--f*SܛդZ0x? -`jLY[ڠ'ORY:tlnɽ^$"RR@p;kGT؝.쮓gN!KZ/ZuF:\yĖ$O,`x4i0iۄ 41HK !Xth`,6Ҡ ~6نHBz<+Ec}/{v;ɣ5:oHw|$C DMIK3#QCb77S <\_%6rv';_tRX~WHY>$p3]{ >9i!T>6jK% 'U*tv̤{. VIG " 惄т~8vu#3{] nϴpk;Sd=VPO~>[lHmѥ0UK})1lg~'7K6M(љLrY!9MZqR1mctƊ"ӓzn [Eb8lNcC`@0y `"oUuGcg|ܫD+CLcѩ 0x&NQ.ua%B/ rt_ǵ=9MilZ/5A]ҵUMhxk[ Dgv%Hޘ4N-Ү)k̸[<|LZyJCRPPjCZ)FSDxoCcCwkNi E+I95$ah<4= ΋^#vD^_H_>^ƔI t{M%S+TC+N?3&!'ێ9#]}!ugBP`lT[C"$ؿI"D% 28 Ekھ'Ho$X/bf*S|u5ļ4+1-\RH+ Psɢ'q˧eY-*KQpQXx6|k"W#'Vb-{@L8`Eէ>Rv%VҰ55bb|O7tҟ*bMO#rƿMiKRVz9SSvS:` ` F tu#A]Mf!{gNx"%I {IpV\ =|P`e[@:$\,K=ս;t.{[n:e`O>)N'%k'ϢPyiʪ VK:Lٙ'T\1Ag#Z/}۵F<j^QNW8IY{K'># VԅG'W/3x}-(5s_@6|@ŎyH^3587-k&]]b+·eZ|@6/ozKL,_QS_߶}HG/Y^bnlq )5C9tp[TOU}~(}_*y1J b= LƝ ?ICyz1NiK^_ 1iDl "8R9L uYU+Pi\ߚ@Oj7Rxl]>gT>vlձRc40m߉Wüv9r**U~i."0ן#~\">P2rnC,$/jrïREOVZ Kbd䥳~{OxI"[,YkGiR&/k_~ )g3wBSDϰ/"T$IOK0^YQm̡Dz0B GNi0mjI! ᏼ?]E 3.w[$aʣ9AMya /a6M‚MH1\?e 1 {C{k 7%q3.}-үAD#k7oFILM|@Ol/5{K oog"Dwŏ e ɷuYh!% +Xv[T1"}b9U( td=Cڴ,%|)_MBgP9y~CHg_ʨ"2D zUbQ56T޳}g.k#8/H?sGv,aILuE"TpG7SD"b"5JcFb,J$E|~_z|tfy RrY:wչ 6ь K B#!V.<4Td Ug5e%\=ar Ivq_z]`#ϊy 1W kx_?-*ݫQ a_|sHⰭ)üI%0EG]]!AD-69<LhgO>in<2YV1G؊NO`|\CEMx7?ʥI%$ O{Vմ}]͐-W1M낛f}ܸdo[̰<AݠGţfn/u!mqfFT?(OJ 2ۓ35ޥoj:Aeq#l4|3((F'\xSkne`WXt7*kTCpvV∲\AzH<h>*i\@UY*ؚ}]_y?ynɔ7͚v2쫺nNO+%CT^?MCbrIP@/ gʹ:kDxVRPfP?#FjT9dgZ;UaCH%BuqATzk6m pּ͟$5ytĚVWeFU 9L>~<5(m'su7arCPY|3[IKEr0hNJ 寔558ffUX~vr4482{{8"B"N䔍00?AɎQiU/Eø edl/kzdm7:;n7Oz2T,VˁW.B P4<Gձk`qXܰϯ- t b(rI+%Ŝͷ`3{S*B&Qh2IFwUYD 91:%p7rgFQ01Kԡ,}WrL iE>wHXօ#o:8~+rm.'sq#֚Zy,6W IAFp["`d>HZP{&ߏef:dvPc"OK}:d/lhf WLl2!?dbG5ZT;_'rCy)\> Ry>^2;}O?27A!9Ibv6Mn'w5)!4ك9Vu(@-a<ݵ#!u? ,0 2DkCZ=¼6P~<<oYdyZ!R>`I^DN0dHBR$x]H"([Pʭueei:Ղ(x&g Z@I<Bsr &M`CuPM`"3g+mG7%ﱘ&<,y|J[ع3Jg10C_"M]B w; ؽM1.wdaj#) (>,h| >lY`1(B$WmU_bsWUdmΨUЪ8ld(WCt+*8REv2ƪe c"Wt%ti7R7`4x٭ѳ*a\ͅ,cO@ ؝(e0W'4m$קK}ïfctji{ EZ TРw6ixڶt[ 6]]>R%Q1UFǀbՉFJGA-x :AA%b)hB(Z7kн+mw3a< h 1b88 gǘg;yր%N'ʾ Oczb.q-oUiqE>m>lN 03$ʱ(2eKN&Wm3}:qTcywRQtjрɃb k%!4Cys(V9(gW"~AHG 5]+FfzlKX`mO$vw(qY_bMej֏${el* PFfϠ,9#zk3,ChiIp1TUywC˙7uFV^Fqd+hj(<N;=Ep7NwcX='<p)֐[-dXD03?#Ǎ3vidi@ $x%Vʓ#@k~h&ߞAnpP̧YdqNclF#:xU"PX5S@NGl ~pL+Lnj0 j7zKMv<DUmM8 S:o1+"`Wc똒ݵ^<cBc*VR:y(D)2FW>=Z@H梨{!4=URRoLS66+ל>FWq>{ᬨC͕W\v1[֦9fj&~}f,&GgQ*QC!dF>BvͷR߂d9{i)a`81p@䴝vj|mjVw~`&@BAOXz*q*KΊs.(1F/|Vz(sLl&cŔLDG>P0kx!\SᏚxFsCb!^)n%X.oT?zy \Go(\ChN=@Ha-j~LC۳ه ̒Vnh(̾+u;ЪCO( Ik{;=`16^i_p@< aɱY=bbv3:`(AWpN99fU8vcs k?iep[Hqq2w%Бƴ(Wl[JF&@@*%qs ~/p~.Ux)cD+6O&,;t B/&jE+ōeTMWzFWL, in6>1^~IORo7U Us0Jsdv dFi΂Egœ>DZ[F˜copR^:y"'9ȴ<gRu]5\WN4'j"uBR  bPs-_Mɖ᳛拲NA^TT}GI+_EdFh0q)YfVIY?E?wTa(GK̬XB!{t9ˍ4d8MoCqA5޿{a/, ĽbE؝쾣7pm p{??Zk+˺`XJlUQ5q̼p5J{ئHy9=~F]q<kCBqA %]VFP{ p VN#GFwCKJ& +1m0EJE0Hh*Bk(#ZL>}%l4I A(?޽ J(QRϙ6rΕ7nMͫ')0Sfcd%प|n{)eWKyƇ.Ħ6WDjOt+em%צ|U[15f{gjf`o!˳;f*)[B&騌м]KsE1C'l|.sMnS=MC&1}z3ch:XmzU 7ၙɦSU[qܑKVKC]*X{PZ(D_Yѻ|*wn­ D 8JN4[W *ZB*~q۞h?K N E QU|TE %M;xd:x.Q9EF. f4)NjlM;+6eBR8S7bK^?|),CEO'e7/md,B.Wy/i'>~6@4OrbFUuql$(4<>.Vbv3Үn (ĂM^v@ ێ+i ȳa.Ygan{S[ofsR׃^trX]tY4,+*)/ 1I(F83@ja-!cab(1Bs@8gЋÍB7pnrdwo|yp0=Xtu,,"kgp^a$W1$lw߷X S4* ^2By_H}İLYU Eǃb尓~-zSNȅ 'iT`*: M!wځ5g[3AW ,p{H1rg 1-Ы̆N5 5#΀]6K/#Hmπ%4|kllmPNoQC},R"]UďCm|ҽw[H~ ,b|IbGK?J6NC4KwXhbAC@.0(HVieM-wLŤ&Y; EJw}}'xB8.63{^Ρ,e{Op&^/;bRnpTqXtw.?&Y!=`e͝4r$-NOqG Mt1(űՙg7U6ׯlػA3/a})iHl 9xl ^9WrvheaxeL[Lͱg̓c3P}+&"C,Ğ ggR; Xu/fb3>|S 7xH&+ 9$ڿEςZYVsla:=$N.tLC}m_gPn;qhMYHj2)%.loJUtC!s|!yb=$t( q|VWJ=_nden"j!?b9|J[} !R+?[=^xW-RbЋq;Ԁ/D,B d~g7'-7*n l|7DQYЩ~3ԕuEx1*@_blڦAR*54ؚ?^Rm4g+?X·(ݣϱq0|*X;1U K FV&rra=*ګdaq6c^c yń-6*{'&.c*WGi9%>+f,!uZMk#Aw6s[<{9f',:&հX};S&> oU-" tEMDWjO϶L2{_ L`v JW[2;EC7u+!hnC)=ש̓(!FUN':y2C ›b \.[taLHUP Nq"W HdE LnB;߫Q/u0Td!9?:A )S2@=3d Dн?IF23Xa -(Rrc?'!m~ohgY6_`eNӂK OƛIo%}aY?P V2lCHtL|tq,Mi@ؐ-y%I{t&gB(HvU9߬$(Vjɚ<]>JR9ogc>1:Lɼ+reP6 bY ~'T;<X!ŵq@DKPwr+G蕔=g\q(ZR[% o~2۠IOy)"D}*sS؋~E01uTڟ"lQz&ۻf`KU#_l@gir+ a"'x$fZH#s><]]g;!U~}#>1 tÐ˰;βS9K$X˄의q.&Q'u쳕[P(e}ӾtX bp0I(oCG>cqUhc8IiD^UZBmN#󊢮Xo1=Fe%TǴ[=Iό{=q8:Rat5bRڑ aR" ❥/:.$:Ly\@}a9jwM}|#sV z',`+)1I B/ mgu)brYqdҢw2e}2`B=HJoN6 5%N  cEaKlQw#c !{+%ID,@~-;)@S-{s19 Bއo*2[|OciMCBDvyCpD#Ɵ[e 1H+K-L 8X~u`CCe]եLM݁.н NgHeƑ,#loZO# x;&JϜ[713ճĽ5eUu=1`nv)UpB!9__ߥz`ԷE{.Nҙf/ݢP!/ &s86tv*U>qHݼC< M$cgRKiI_սh@ӃB g.f\/mc00wL~R$^Lks^5X?V:ƃrD'1{hR*_B~8 q(l]ֵ75Oc]֛R h m\@ Ѫrdzrlsd*// Ky4 S}d>z#B<!H^:@Z<3TIc9&a%\ i${ԘCRV z(fG;L`89oz!ڸrz-fg\ |y9ajq%=Wlw$7 ǃPtq.qn5EK,:+QwF; {ժYݜLQ>*pw)jog3I{4s@Y}w7Jmƺ>K`~Yu5MvdC<}˸l>MˬR*RZbC `TƑywh8 ت*2@Eܬ tZMwMޥ>ۇKMk2N֟&qx_qOע.Ucֲ%[K>n^)櫈.\9ַBE *=EKWRC5u}[ K}8BaωTYgʈYX0乯 ֗ۊڡ,1h6Rh}!#I;볕vX/U ^#I r :Xg a~;m1gkI咯t7*KCYMYI ,*2jG(!>"|űs,4w`wZ׽L6 "}g7V#}ċ^r>"3=,,`k2:)n L:\!\,`<SkD/5ХP=x]rçt;ù?Wn4#DJ ݓq@SOߦ P bN>e:d=^E]Iw3GkTx{6TȽY {< ok+x2!RL2QY"qV/5 &>ljX< ~SQ$K< 0RxmTzE}BDj%=-H_6w3Zikb]{݊Nh jm˛ Oq̐\u ]ҵ0LI8lyy΄R.pVu8$zwl#XlV?:$N8HQ|5 }x=MXU,ox Rp0t޸A1ar ة-Z*5=r@=ޞѣu[)Ū8.QO]_಻r?;=2?#O>VkPXDuJÝSmK':ۥ)FU!cq1gBkv/m{eKAxwuhGFnGw(L,c/,,{: ٚBu_,kpoe 7QVh(k %jI/zeԸg)!U(,#9xE󓀠Y%f g$*C#!^H4 >;$gvJ~${*Xa|uaߞQ}˜75Ρ<Ԫ"~NDHՓ0: `A[ј&4BS1j[.hcc^n:y#0$ &5ya`6dUAS{3wݢ}ײ0PFa}]M%JJM@Ise8?ڛѰUE&{!q"0;Ϊ !AͅIL-G8a2c}k?+p6nok]n3 '$Ż+r`wv阍Rw$fXhG²[[wWSr P(W;F\hnt' p6eX?6pނw72F%>@>-1-uojF#u=Bsga&N>3jyTY=лx=NLxIce& H|u2&H~D 8+PW [/4 l" $gɋO);<&9W pZwEnzJrPAsu+Q >=^*CuW/rKDc>_ \¢y\KKzH8ުbl. {$H֦lH[ <Oeg`mGL˸XC :Pv$ȿU匩><V+>.TL9!Ήy<7Ĉצ?[ @VOclޒb!C!ic,zA[*(:Tg$1r<oSu@Npd`&8ltXfP- QJȥ Fbl:Aϻπ1gUkDnJy>g}l}c¦ٕ3%1 G~+ƻ+OOZq(')\c:7LM-%=70Zp1d;R-*OBӡIŏ+n:pWJBAUG`ŃF4Nr  [@B[s@[IJoZ{`ZbxiqANQ8΀rspmY?z)YzZ+[\ [}Ja=IG[=ZmG;ppXVcA6=:*L%oE?u[I/[tHۇp3JbZ[BOZdGEhGD&DDY6f̭oqpL"[ qKY`CDQ C4ZnfmZaoX$DhoOoZpUro4GD<rpsrq()\?Yq[q9Zx<IHdZ_9MOrZD|pJL#+r5E`AٷL,LZA: "LqY8IŭGvq3[ΖDbJ9M:rCqz~[WYQ:XTYr}J1Z+q&IښZz;I:K!AOjq9mYP[6F=G>*6\psFYpKGZ[9pO[HGZ-Za[ D?Ks RpeR6qZpp|IZAJWKEoI5JGry{?qq=>y<[&Y#\CSGdO9dqH3J@uYp7o9J9srZNHZ0-YkqqowF֡A^cc}6rjrZG@?Ooq/I"qr"onoTxo99@-k-LmGyGC[*!^GKbI8Jr/DtМXu6BZLbqm<LGZ\^L#2E[t[d$qUYukJD*GPKK1)YNqF\c@oXPZpYXr6z9 :ry[oWJpLrpH 6ˑ>)Y[DdD Y>[rpZ<XlVDg=rzFD*pNX9 c[FqJ[5p*ZZ]Yr$Ho:V [y;Gq8&[;9J>[u4[qI[WZq[3qDgxorϋ6IsWZ)J,)XB8 ZSGo7LC7pjEgwf1Ew ?Prq_\*E5U6 r vJ`YsIIJ1yL pEl8q EJp\Cr)i@H [G#EwGiq?HeG(Y|XxZIrJ[q!9!6;Z'r*:[a?U@D[`Y7Lir!p}LpcSXVL>[*qEcIED Y'BrZGYpCGCX{G=qEZ{DɭKE[Y;n[8-J[Z@poY(D[f[rDp:dC1gLBIs ~EZbJŻfpG p[Kbpx[Y@+:Xs,7bZ0F[YD8'<W[Ar~? ppn]Hp\0c[?x[6r88I uZgr J9mH$ZJr[W[!62@A[~%[JJPG0pWeZA%HWFQ$Y*H=HHGs RL \)/I2qM[Ȩ@qoB%rEHGD(rzsxvKKDWYr_FD_FKO[HB(r7YF6r+YEd@Y8خ[+qLsrN=[oK p@ZcYGpYUs*pnXK6>C!q{H<JN\ T[L8t[J99)Lm[Ap7x[>aqLs[9D^&G2G]XՎ\IR@DahJ@D)KNHKFѡqdr[uI`KoKpaLELdbFpJksC_CIL<SqLI2:V$Dr*qo؅ph<HZY&p[ZYXL"GqLmJZ[NVK9\DODlJ;N9p*KfHXZ6k90I E^GF5oK*Hȗ[L[Ԏ?"L7CL0 @[?-LBLq[oLEXbK[(X[ JA$q*6\L9VL{xK2J=6gAAK[kYxlDGl[M\5[z[(sES[SL/Y[mLLJv[LZLCH;Oq=JZ@p rL@LcCHZGIr8pQ[so9oJrrIZgY[K8ID>yqi[ L:GYq,\3I[@SFpY#o%@V`>}plJ ;o,psOdf:[-H\dJpsFP<K9rw[,=7/Y#_Zvd\;yIYE66BmqF-arFoY[Rpb[}IrYVE4YqkIkrtVEoD[FY4s9pI[[AKȁ<CJ~rq%D$3ZU9}FqY[rōKz]6WGppRNr^`[Ij7EZLp5|=A:gG=\.{6xrUq#7r5z@[L`ZjIpbI:D?ؽJZpDP8"R7x7nsp[7LtF`.Is[s(Vs/Jo0pyrKGe+4[[pM8L,IL/96̜IKU [K qlq9LJG YYvqc [7*pp,qHp ID%DĸpH@Y]qH@DY3ZYrФ8jYY_pKZAZfCZ,[ lDrL8Hrө[Y/kY%'JvI-?lXѦqy r5I;x[A9KGIPCb riZj^L1-HG@DYL8s>)Z_GIM:iqzZCZRp&\!GDZ3YA`1q,pJo Z);Ntrsr6y[NJKYKpp7_[XYiZEyGPL.Xʯ:f?վ[  )[F<H/rٓJEq<[GrsbJ[D )9o*@9DL;Wp^:oٶ\Kn9ْ@mv[pHHJrI1FoZUctrÂL:p[[Y:OIZq<FA-xrIF\o'pZt[ǂ[}J+YfG`idtpJ#JqrT|=n.Z7Z KPp*+I%CwW!jZF*bZ/K\PKݯLr\#6YX8 ]Y[UYrX[7_Ic[ŚCDsMYCQP6C[HJ>HZ*YIf[`GrNr:r;?[2[kEpxr'`[ޑZ.\,[t@jV:F5A9,9&p(Y[fD` Zx J \0HoC2GZhHcdJ\+W@]Zˤ[rX[q=p@7JY,D[ҫ7GYkxp͌kFGIZKo[קHGoZKlpAkrHZVq78[DRFY=K6VZ7Gyt8$Y =GonXrG!YEZ_Z[G^r<cL#u1pA>ҋGJ&4q>[9L$:p6L Kp:MYWp@m?l_Jӛ[{q87r[qyZܦL*Ak,pG{XeLOrpn[#<q[jrHDZLAKoZcUHEY/daZD0r6$PF[ZKN["G[L=sY\0Y[@=="o=<pI[DHrHYdZ!fr%L6JqDDݩrusnK[HHoFJNGD+@eLJ[0;lTqj}q=8Z@^@AVpb:DqoYH07Zg@[Dpq:;Lmr rхF~ES3py:1KQ pS8 9RYEpnFProeYH6-GZ9I״[J[_,qZ,8n,E[ph[X0:ZL<['+XI[ǮK+YDI7yJ*rQKCX;6=nKZpsJ3Zfq̩q'CYz[|po[ k[,zs[[?YyX# H:Ji[r([Y[̬Jtqs8˸AZrDIYr,YGaHړDj7ryrAbYlqq,JIIiIdQLA8rFnrDGq[Hoq1GbYu?dzJr}D56[rbqF[@j[_=dm"J/|:9YD6IeZGqάH&}FD[Yp[p` pI=8FE[[-GD[46$X3Yc'rHzq9Fb CuAK^7dCjquX\Yܲq"[è5 !rLk [`3G})[jo(Cudpnrd2DmZJZ@N/Y")[J<JJr ZAL\IAYK2r$I߲[Lh.Jg@7oH$[G`LH5q)Dx[@HZYiZvJpZTtZBoXKodLHr[@:<H6HoYp]IJ?[[LsrFrZ2pqeqKrKs[p d~DG888z}r\Z2ALkZfJD+[YW8rNX?YZ[ ZaGvH?LjqNJ V\YrmFoy[p}:prFk[Xͨ[+;k[JYNYC\CKc;NY4p@>&ZKI[IZ [Jy6>rZ#.Lo>CHI,EtCI;oUrLHpn[*EozDfJAPY#r%`GZrL+3:|rYPF;EkF1@eYoڼJ$gK|AZKn9@K"GnIqDI̋4swJ` LMosE[;#d;[6b$A:٘=zoشGL/Hr9'pL#6Y'xp~rqhYHL*G[ 9dm[ YnAj;j,YZ-G8 Yba>ZT[DpLdLjL pYIIHrrs_o>LHFoEPYxG Y6sMoqqzp5=}D*oY?KQYL[+9=[qC\JrrLpZLpK@YKRc@G IrGGeKr+GY,7LF4(;6[/>-?dXLAYJH[{rO5JСq[tNqp6 [KopWop}'HXpZ:XY9*L.AVCI@Xjr[L7AjGH:xLnTYGO6=pJi<Y}JlI[`qEp$uZs 76Y?D>oHػpZ2yH֭5q YOHq[[,dD9u.Xh5[)rXqbf3rqzQ(\9*"Y*LIJ_ CIqL?SZ7X}r0duL[k<IּLJ!diAH8fG9RX. qHZglD HZ#Z>JwCf[t>YI׀Ltqyr:(&Lxc3K٪kqWdu[8[@+Kr8pVI7[Wq>KfYeF0oLCrJ:NKRqs[p>DqXD?DqKRZKo?qpG9.ZD@ryrsZ@~o[HZ<DxppPpqar@)FxJ<WFFkp&8,r.zH\I$I?KS p{rqr9v#p[r3LD FSr:{`LEK{8KG؞pxY4G/(ZF<Z>pZrpmq?p[`oB9m}ohSq-rJqZrpZ7KLoYT[47vNKx+XSD*9_JrFosp6Za;Jg[A /q NLgLi:xlGE(YHKo0o[_Z[[kGFos>8[lH[@=dGZo.SZ߂A[p.8Bpjb[}p[}KqL99~L%opq qKPNp+D[HttqRLIE`Cr{Y{JoւLIӴYsZY[t5A^GprHYGh[}JkG HsH \p8*SD?_9F:JZ>X[ QrI6[q)IH6؆qI[Jc"rpA@DHaJXL/[J6K0q^KkL,\oZpQZIpo(FQpHoop@ЂDNY6́A =AF0CeYpq(F҈[FoNr+:k}L$IZwYp[C=LEjZ<CL;GY'[JNFlG|8]/Z>SLJprH{oZ.DL6IIOZGz;EEk[*LY0HZAm<sg Yz1R9YOd0J4Yl,DGi69 Hp FrZT[LK;ESoYϙquLMJ5F=[@LHsErI6AZ&GsK=pq:F"4Y{ZcD7H L#aLp;pKMJ/q[A[DOp[Yy[qGq|L<ppUGZx[ *9XsZ?AXhd&[JR[[ 8{HxDzXCHr[p[Y1YrB[8 Gr[B<rOq9tJM_J?,pp_[[>"ZJ[up?ou9 GqgqpdHYJCKnJeG3D spsrb<V\[AG Hp#39 Ώ;l=2=p[Gxr?HĒLqP\ralJ)[<rmLopXDqߒd[&Z(-p@ޥZj;n6rk8Vp%zp: rVGlXr"oroV8D C7up*FKJBiqHH~pCODrE@7LZr %L:DY8I[!q "LDxYUH1JnZI8C&p\1ˎ=8I @Eo9LPD*LZv G{xqJZ݇DL)p7/ Y[dq'LpfG<YsTIReJ<935H GJb#@H_\E rmoG@rԪL ZFhYtUpyLKpdLqx[=GX<0#E@E|Kp JpGLO<lG:HKRnZxp q:b[џ73<p[HM[PF qUZ{rL5CD8rChrq 6[gr_Z[EZ-[ЏdBpFrKor8+[LvH8oŏ[rqY6GqϷJyKRuY?_GAq^q(S[oѬErEvI?h[F=[[oq1K`[Fg\J?5-6L,PYAIHZ-OL@N4Toļ8H[}1IYF:p FcQ6*YDG5Lp[([8qZ08tK;rJd^rZrT[zZ\[Ep8[Hf[s` IeAY][KL7vZLHZmpAFLPpBN@:GA'[E[=K5WpaKOL?%L5JFZ)oth=;I5pt?s0nq;o-6LmD Do>tGrYCAyGGZ@@IQ3qF@F,9~LKIq;5XCqJIEUdHY%6[>M[X4Dh(TLH9([["dLGvJԺFvYED:Md<D'[jo.J6? [Wdr4DVq%#>DH1GZp~hrΣ[YBY\GG.7c3GH|[Z9fp:G[ՠpJd:7opLNLfo:L@r;F[Fg:FYqZ=90qr[PA%[h7mCHtDJLDGM56yGZ<<KgA5ZX:UYQ8"Yqa I3Y"D[Y6rrOJ[49![ :|pq/u 86EfI)LlC@r9:i=[6Cq+rjI^H#[кK\=ZL) L<+EoD[p4K/KZ+rvp=uGHp1ZqF*Fhpm\#JRAGd6[BF^o$[Z [{G [ɰ9TYzpsKM8H{[+q XϠp[YKJ_LI>IyzTK5Zd8A '8Lnp [eL9>fLj8J$qZRpLbJEG7qJ#|q0LMKZD[IcL=m\M{JvYIZ8aZo9Y?91CAF%JITYjv{%p[Jf[7TLNXLqi[KSqZwr][Y{k7:YkI?`dNidX9\0G[FqK4F_rILld`Le09pr<rpLxpzZѕorcE+[9JQ[eSEK)<F9GʝJPEsD:Y|ir/L!LQ_[ٟZxwYKYJYI I;VJUA;(dYZA,rrv20Y7LfHFY?F46zCw[#AC=Ы[[g,[?'K+rxvE5bDsqX̓q- CIorY[-m[;#(rrrՅZq:Gs}:ZZb@-p !Xj[erDoCq^9 pp[oFp"us LiDIL)SF"=9Xr q&pD@W?6=Z8XLF[_3?X+FNr [h,LIX3oXKZjD(YrL5wZ1ZQr[ q?vpHGqK(ZFnqrZY[V8-RXHkFtdp9yo=q8Ju'\'GHFf6oWLFdq\^sTGYA[}CS7ZDqw[CCLkp/[pps8KA Fzr;Jpo^K9o6q,s,da][pLApK:pj0A IJq:[YGKEYp+6ˆ\!9K9KKJoL@Il>so׉C>[XDjs>/YLEdo[6CKS K,rDjJ{HLOJ^GӋFlqYEO8op#QY4FhC;l"GqqY8֢r_p}AKFllL7͚[Y[sG?[l&Cw"ZqD>)q$-D '#o LrpFZAHo@K KG|YrMq+\L*eY@6rI[~s!6q+Y\YKZ8h[pL+HY7.ZElYi8;9Jf>DϏG&X.L:_~[{AQQ{d\?F)rs[b2YG[dE[[.q]Fr&LDpDLkXpd!\!KNJJ(p!rrrr[GL,(rU[a G˻GL[L[ K0HCr=)F(;Q9"\G7\7Cr\@AK8ocpq[GZ"r'E#qj@H`-HJMsIZ EqYzXp3)7qiYZoY|BDuY2\8u8=KwZ?4-q4!@LYZZ9p5&IۚppKNjJYtzrYcoLt[~;5\D(;Z͠qhs [Nhrɘr+FYqZ$)[%J0LE0q,oqL&Lr?C7LH~r/HJ}6sIr@r[ӭ[C:qj6_Y&GIiEp":7@B:XqX^CEY:cq3LGpd%JEsp`[ąZA9[[{qlZa`LgH[.q["Xo/YoroJZqQ@޽G7GZ[EҽZeKLZr/Z\Y|[rD'?E8:ZiPICsXD: r[BqzLg_Ywq @ :57o]YGZv[ G-rZprS}%[slE5[zqo7"GL2Yg7zgGqI^FrVr4E :jaLKMIFXp ['r]h<9dZfqZxrC6I"=K~Y7` [Z)>xXSr5F@Z2?Y~*Rtpp IHJJ<DZ:fKTFEpBL$r~9%r&QJDI5CcGLrv[JKG.K3zsZ+[|M[r[&oW[L/A[FIXGKIj[e7&DGAhA=ߐY`6[g=Yv:h:r~G{?I^[MD4[b[ZdZ,6Z GDHK<D=hqJ=b>H[-[tpa>a|9cZDpj8ؒBYz@HINplCrZ7+rUZbN:rYZqLpB\3Fo˵LsC5C,oLlII%rLD ?oݤKG[|?y9<Xn$DD5AOSLn']sZ*LF?G}tI3qK&rEJ9;EH9[r;YZT[Mq1YrFodD$DDZ<A 9b;pH7wY,Y[GdZA&Y[9bZg[3G\TJqZ[ep!,:AL+o*q._JDKo3JsdZEVYr_EԱ[H$88:qF7rrL:[PHV$[P8?pG:[8nCpFoXp[pSFlq=qZDZ"D=[)@]vY4ZKFLs@GsIo o0[/rf [KHF,ZHpTrOYfGhSLJLq [@-9=r~ D.Y d9 8f[;p(\oLYWqv-K%dFtEI[)Y]t$GcL4 X/KYKSMG`B?ZorALN\>DJKK4sEBrJEfhrmES Bq>qEdsqW[!GopjVrF?5qwoöm>Fc~=bIp"Ff>:ar=9/2;JK62[vIVLLhKז9lpH"8ϥdT)r:}Y$L~vLIqE[ЁD.DyZ#ZpU'Ij9<[G[_o8O>KY=Z}DAY6Z[p [G[or&rY%[x7J/GA1Y,r/QI6HIyp 9ZUL)^Z@?qqpKL1D۰B qHUGXI: 6FhxDa,[)ZFL@ DO[/QpEY5A݃@8rz}X[NZ91[[!a[@raGrZUD8_9d;o3G6[_[(L.<h0~pIAEkJ:L rYDp3,Y@(rY*A[Y?{J+TqmFoYEqE~o[Y)DJm*6IJq>[K@^>7A J:hZY"G.q7Ftpr Jf^qGqYKU8uxLzZŜ@|I oHS\*[8I807[Ge[ҀZD"r[dJxFlIuqCE[Z8p`py[[2XpJ,YpnSsDKr|MZYZq=b ZD)XE6Y>D'K:GE܄[ qp+D@t[L5:Z~\8cY5cE nH F[C:D[h|HK?KG[*qUoFq`LGDIV @K;b?e[JKI\F&pE[s˸EHs1Bt\kY?p(Kh[9-[[N>XJpYrbZmL }q3FfD1J~YC[6[J2[XEa!;yZ-4r1L6=p`?CBY>[}D:53Zv9H;J6M?PQ[~ FMc[U\6'[mL pp&pLpZ[6FsOq:I (pgrbpZFpq`5ҪD@YYuC9X}s.;>[N=)?tY CE[<xdk[6p8rzWD-vF-pGZYIE@@j"=RDqql,:@KT[hL977DPrZYg[L?Y:U7wGg[[G@e&ZdX>oi>*cK[ZπHb9!qDpxFGL.#FrDs/rϷpMԈ:F\G.qrȣLp6I[YPG8qLLlrM&9K{Iz\F:6 C:'ErP[%[ʦY 6KZEFL5M[#H{@ydzDLn|pYIp 8ZgqZ*sY^irY܆qgoR [Gc\CrMLg[|YUJAs [Frq7KqkHJopLhI;Qys(JpGI!83d^nq&4D\@GpY YvAG|c["qszZ(QGLD7?XSo۫dq,[>CHyZYz\7JrL6D*pqL<rJYqt6rq_{J:%p J5YD+ebZGȦ[WFhqAD[KtGrpGoT[q H^{ 7[MZ~aE2Z8ZZcZ)DfC,::K+JhwhpGwgorAiqaIZsdxJZ|\I±<ARqBKoqXBq&_oEoF 7X@$sJoqqhp)=qpkYf=YD#YZ[YDA]tD!p}KJ:ZJZg6Ʉ[$r[pYapqY;Y3qQ F,pGPH[X{EXDL?Y> pQ[͊[e[oZLk-EKK>q>pG2YIٻ[[Y^DǙ:qkYGK3[F[[Za9I6/7DpfZL=?[OpJW<Gd*=B:K@Ip[Ӏ[D=R@8T$pa:Gr@}frX9@[2[}p97X[߸kFpG&CRRXU6=pYI[#[8Yp4orJLh[YDIr[wL0=UqHKZLCoKqir=p'[;=p[\[HJD[*8IZCGy[K'p[YANJyIY[>L!KCq<;J@pq&DL1n>*ZGo~HL>Z?[{rKWZ>DnI  YCJYHMrjFGZ٨I2r=[LnqwrSG3{5HAp>(K9rO\rqqY{y8b;p^p9"6ZI>PGJaYGNr!}NZM[L9C7^rխJ_J/I,*HY>_rlJJ6$KI:ZC.[DhY{;Zg7)vFCu#[Z֥JMZrFAGzr@rj'YLdc@&rv,[=oոq[Trn[X4I4Yg;J?[8\-rvDdLcFKaq<p>rK C`G͜rp9[5r*[8[`psw7qP[p<PG[[/6rso^Y{YD>+p6$@M=L9/D >F٠q=[c;a@IIzp_o<L>yYrtY^\&[\BJzAIs@pZX&KdBdrIk,F/ZL8ZjH/FJLF!r[Zr8mr7G{Zc'9^X[-@|ELJr)rѲq@cYZ9L?@6LLAD [aCvZeN[ckK;[7j7[+%[D[pZ7Z"h>S70_YqkCy[ZLk~L?sG:djL8UY[ݮYcX[i =bICwHLkr| q-JoAH{Y69Ӂ>=:or/@ݺrhTYhEq-Z:[ /&EٯL;IrBKKPLu\GĒqYJKYCZK/DK2H[q6$["rwqg>Z5D,HQ[7E[.96CacCKSIJ>8ӆ>1dgGryOqqKVYyX <RY5[OpHV8LLAq6lL [t[R=I76^J[pfKTqjrrKf|ZbCr"|IY<KFK\?v!Y ?$ZsLq"DmqnZLi@]L0rY qLH`GhrH=ptX_!MXr`p[|oEZE9to,YE:xrW[PY ZJqyquZq@[BK6o[r!,ZZt\Y8[[rL08[J"DGF[Ƥ@NrH$D\YtXl[P 8:TJ>I9XX6X[p^8prZ`z GJ.Zmy7LoXLAbZZwLPMY[RE6HymJRrSOۃAjQD\*Y[pbhX[_pM[|rnDQ[zI3q[qid@y;IopG[I?G/Vdq8Ydrr5[rJD:5q+6r,Lgprq9[?skZ8zyRpKIWHZdA@[`^qroY>=ir[8GhLNSDA7:@rUroLGHJJgH`rKpG6 sTq4KZݵ=7\F[@nIDp;HqJ8HIZ8MsDýFk3XJ=YI G։YZ.lH@Zn979ZrZpt:q[q[X )qNYkdVo׶qpIr`pd8['[pZ`YtZe"[urwZI@\FgN@_YYGG6Z+9AJru %pZDFXrMD#D>6=MY9I.pqoZc7mr2Z*[s[\J KHE@ޏL>>lp@OY<GDqJ+G[rsZC;Iq$y[}:{vrI[@6 J2[J [Mr#[D:YEFJZCYF'Ga[Q=w?5[b_gWI59 o[L*?ut7o6GPGXY<GE;/GX[kJD8KiJ(GZ[OY[tYI>[_j9JX.[FqsqrK<dn?\F{[GZYD)6i["Z2[SD?9:rdq[oZws9d;5L(Zv6 moL Jُ>ZZ@J0r1q( Dr:o,rVop'rO[3Z[Lr r[}9.[[ irpJp]?[qqI[ˡ[yZKY[7 gK[&YUo}piA@mP[%ZFO[G$[6H(HFcAh+F5WI8Z8A q[^<92CYZtqlRKYUD]99[6cq&Ga\vpq\IJqD[Gr6fF6;'A[)ojr|Z':I9[dCXZtILM6Doup7/JrEqJJ\^AP;`v;R,7Fg\;mq_GJ1+qkYHmZq]VY LoE7@\q6q!cIKeqrI+6qqjJVSZFd64ZIOs9;Z)cAkBZ]jq%8u;QTIǣEXp[ >M[/&L1ZL&[KpY5Yf]p7H?q$Q[H&9q?[1Gzq6SKQ[t[q~LroUDxIZZr4hY%LbXIF6`H&cq37'@}+[YJpJ@Ls>m@ D=Oq!yJrar=(DKEgEHpρ[*s]LMaC,G6[%]dOrGD2<= sI=I r-rע]NEGMr6ZpoZYpYJE.q/L8KDYLpfEIƞZǾHwpE9[#o+Xq:GLCbHoIEH L:CK?r]mPH"[J3dWfZB>YF'[kqqI':O:xr*pCGZ7PALOA~9x4Fri}JY28YYFRZbZ=GLrkkCJsWy>Y5[X{[r3[9pZ1lL&yppG6Oepi:3[}>:@6J/E҆FcLoqBG @5ZurMKiZ"DHY6XlD \m@b8Xζr6=q{ZG߶1ZAqTX@=G}G5q?7C:pp?5q^*9mZ"[DY|q$[+:sorH/YrZ+7b[I>A\RZkhIJLlppNM :$pdJorNZo\?CZ[ڨLfN<K8<)\{D-KGL;ps[`[IJ^Gq+30W[ L?LD2[_d=(pGY/cwpGKCKGqDYpIp[͸FAp[FX.H>Ibqrwp@eA<~[ cvYW pqrtLA5FPK?#ĊKApu8}Y3q&6^6'DL\$KNdENZ(TY6D@Y0 o̝G D'[~>>5Y\l Lhp)[Y}Gs[dFlYCLbmqDs dl[f;Y1Hp{p#qDPqZxNqUG[ q[:[YvoZ8CrYئHQ>+8o[WYu8BZHdN;l\YL7L:{G{=[0Z@Ys5qiE9YyvC+IqLK%r 2De>-IYYDbr<W@:Ou[,FLCq`?Gag>`ZwKQYwBYZh~Z>q%z:[YqrW7xI/eq1Caj:EGլqDLj8<pHq'K[?DqF|LDI92,ZDFGOIGY"Ldp:FKK[so[D o|IL.pri[G\NKDXp%D[J}Y>Fq'I$KW3[pHzLnZv6H DJLEZJ[!!Iyq<ov9D@H[]G?HHdJ3dH[Bq*9Ѻ;`\K,Y7yD.[CfZ KU8)p?Yq[[Lo]9["[q[uF"pJ[dU8e^[t[^ZNp92~rBAJpSY GG3DL9r YqZGDI[EZ&[}x8[[ZwL*I4}[6޽[p[@?6G=:_vCcxYJ[[[ psJρNqDqgqp)/q [[?<jpE:l@q`[Y2Y}rL qqD=7>}9\:sIr ZqV>UJDKWI*[-B6:YD90F۽pvHp[[Z@uqKF5ZA[|GbN{[an\H9KLo@[rЁrZrH9\:x9J7i79LFqE[r~9)VJFH=>5q {q:gAgI2IVHqb?hpw? spUJp7`GHd|qz"pqppE7q[GqpFZZuL7>qԾppKG[\r0>@Zb|p~XH[ZhJ1[D JzLFaYlB$EHqE7drtYorp"po=i[Z[+P=F5@[a91[[8oՌq<)XqrYr>)L}Z\7EnTK7wG+[{ZZX8F,qzqӰZdrr4CRgAW8[|HrfG>7bn;FXrI6FoMp oHGMhq~wD rYuDPYYr=dqSFx[c<xG{ ]srF^]>.C}Ht[{Y oڈIG.)!K[ۗqa[5du[R[`@G qgD`LY+Jt[YJe'9[6ZJ[%I:M[8pHSqے[&LP[o1KZ|FHYW|[ q2YhY9/H[W~6DZ[8EHdmxLIr>L r?[F[|3YLNpYjP@L3[3V93Ipr|7YKJ7Y4Y18 [eG{qILh[YמHrr[pAC:{$.U6dJsG6rlr@L[3@?CIջqHd[F?L7i6pY!8q0qY5YHYXZ$\-A JhGSYr,[[AFhL6c3rZDXHEQ[6[8[?:WY!g[3rLhZppJY$*q:xJyFmJ\$?D+[77a7[pQD GCrL)5JI7LqF4Z[[L=FH~9HG vL%Y9tpi.[nGrUnDXǗIp=JVJ[phRp_p:V:r;LN+J pLKq"TG 6=Ae^=XhZH3[Zuo>;q 6=!YJIqZCZ[mL*=[Tr _;ksrk@CYY9<LP%[#>FEJ[*pCFHƬKqpL@IPY|8Z99qqLYE\[[݀[! q2\NpZuD+[C\/=p_[ rY[ϪZ KTsKnK8`L# {TpGc[=L?I$lZ޶?ZvfaK;[(q{Z1[aXI>sp"77r[,:@[.Z KKZ1pXC[_%pp_d[C ;nBrC[/Y?5FhB?G`q\7Ln:x[qFFf*Lmr<6q%MLxpoZ^oqJ4aqB@J}LkI [pTr0]d7 q5ZCvyIiFrI[cK[KZFK[ KQA[Yy[(q)clAIYZuHaFH`r[ܟF[5<\Eqaoϖ6uLE>E^Lp[{DH[ErmGz{KX rpYJwzZLY` pqEE*7<[PI(5I7ZqHJ3F6BrG6[FlES[H[|I-r Z}W[Խ@TR[69)Yi@Lz~JqrguX5pm|@e[bF[ZF7Gpr[ȠYsY/CZ7pJ`X A,[>H\6['Lj@CmkC@ZG"q.1TI(@BD!DLDOpKr_ZLEH5q~Z{q?[9؛paG(ZyZ[5HJ|+[4e@~GGҏH$YZF"YY[bEZRG3|G [rZFpr9=pNZ[r~6ZCfLrO,[tϰL*dz&kZ]KnLLjL0F(rgrGێ[ K[FYHITz9}r[֜Pb6poFp!6q3JbQ:&<}I7[bYGpW[ss d>KUGr\Z'Ip~O:8l[`x7II;$r[KZ%EKDG[6Y@8opZ=BLFpqn[FDD=qrZ|BooHJiDYIJErG}K6%Z*=G7[JvsX&s@Z^o[C6qX`JxPYǦDypKqZY]K;/DFqRrE<[DčJ\=2T lKV8S9ZLr6;pi9wX|>)5Zn:pq [poK:!GF]K oFrWoVqUrdFFJZpFpK{@N\%![fg6Z}IkJA%F:qCqzYHq[O$ozY[2t@r?,,s[[KLI^YL-qILr 7[FqCFDƜ+4[q~Gy@}79q-E)9mqGҢJX ErH[[[Y8iZuHK'@[ >@moi8dG[EͳDjpYoo /@0[[H{[cAY:K-o=8kqZEr-=r:|@@tq$D+LJpOrD>HYroG5rD>FH+<yL;*J(G9<[sY<CEFDZuwqLJAY7IpҾGY+pJ;08R@opeZoi@TY KNqZ[LqJ]XGGJl}@DGq?YjrLhZyHze[(&X2F6D or JpEV\ L0d[tjGqnYPKL&(Z pz%[[a;Fx8pGF[HQrr=dIr|G=+qqp}op/Yir7.?fq!=GQpV=<d=Cq=F6L>I}q{-pp}[oLg2HՍq F2p2D@YeH <Nr1HUrLjbZ*bqU9<&JfoLiD~"p)i}?WDi6[Ʉη/+TQ{tQ03k&]NA
tOc1N}/Lf{6Re5Ji#Ga /Uu0Fi:Wz5Un  A V i C ^ y  ( L f   ? b  ! A Z x 7Pl"?[t -Op'CYt.Rr5]}:Rn+Pl)Qr  =]t 'FbDg 'Jk#@Zy .LdPfʘhI 3Ӡ0<5OȜpFKHD@L(0_Z1Z $"<tZ/IEܟ# AΪq?#-&}訣(ZmECcx۬M R5bXk@+E:b-IZz(e9ֺ)Ey ͺ!#m8VƙF0% Ur{\Ȥ8(fRj`qY۔H1|dZex 5.Ro#xzCE~ 0642^! X[ c:4<.{FZ&j E 8*֞}lDN۶_W9a1[)ޭPZc/,ɖEmD:(g16 ¨5 ҇H4?k>c%1Íљ-ed!z}Iڭakp\aT_۟} I"4nj.ԋ_H,#/=ylm )t @g~53J(`Gw(>mwX;LpDI [ eR5ktKH|^f'*0IGcD',Rg U=5nrn3R?׆<q|4tV4q/IpMY^?⇕8@ X}l^ʞ/]ʍLCm-gLI|~r_q@nWυe0PWpm-@ @xIT޿`>MLڤ 9e}cF9\-<6p?Vʩu?و*i8Xqd1yઋm.Tw\J͇W[ l󺩱NE"(sMͷqh!̤U7 =bfvH8 \*6jSg-qJ?HwC؈e,=P!{Sibu4VP;z:"R0"=&1S +ysjP;-5DHj776]O=zV D K=RHidzZ{rL! \ XbnW C]_.TWc[KWBeBr(/j/1pbtlf_(th!MtV[V6=&o&ɕ,]pnE}|u K.P 1sFu@riVCgu-QhpnG(zَE`F6+Ay;jz5Дhz:ng+0LIi$tbHi$j h_>#'6$qO]'b]-?'w#sʛhMȸ9FS37i1 yd!zJ.WQ(P U1_]eLXU^(: gJcSs3g ߞ t2n#GNV&]g=k_r9רZo fъ}FoT/rzi3$:PDg\\y{-}nѕI;W3r< =w) Ffzp j:8D#\(aіc:p$q=8 &,8g.KJ>mjϙ)ق'q+L zQv@T}{pLґGs F\- JEEsqVQkK9o8M.M ׆=08mq"=R1*94xؙx]%T /0@Lǰk0]`E1<FAM'Dh-:L;߂whtk%Gy hd1 ՖxgyZ1P5RB"qX:SjL!O%-}y8+#D'pE!LNS]a4P۵hyhJg <SCc:VW0ߐD#Uz>ߠӨJk q`gA(u㒆JiZvk%@:62weԆ~Rg4-d`<".vA>PW>N6X%",G&'-HyU{,d4#=l`CnJՉUF 8̾4XʆД'^+*N~L/Wl!$>Lկi&E(Au^x!n\BV")u?E^AI\vރĜfHgkT=%..wT<OKhIe9;[ΆWOF O>VU:=߻CHp[˼b_=W SXskсIxYEO,^vvb$'Lu~"_H(0_rHIC]YlWՀ_h;1i 7@Siym<8S$}sB<QΎT5儱 KJنhnDܫ5>Ji_'̘`xAyKjA%hJ/vlMrj 9IeБ4N3'Q xUbh:*)cIΰgTj9N;aLzBLw;1̵fBC+<!M;ЯXBDaǦaeDp5bF"bde$R/GrRUOWy,"p+UJńBVz[tbYğ1D$)=k<j<a 6^ j^oje\+:xDR&( xx _ǂ.F;y~Ro+H~#2@&|> a~ԲaxT=6M!$ki8g)<tqmX`%bن6Ĵ-Ĥ16dJϯUaQhVg}JNzܪ)_'T AXֹnS0 <tc 3ʎ&]qaS< Έ,ZÚw+0[俎gMBdԆiBR备B;8 CPGc+-q]I‘,ᾱ$qwe59CD]Ĕ޹:0*UfN YN& HV5='R*9hTSki8fH50$nf pACغ-mSV@pWA|mVZd8; mm._]* qhs)r77N?Zs 5r6VtFn&ySMKʝEBN`S?nJYo5=(,\akܖRf aTwAr0p Z#Wh^ŧuP TY2S/~vqomrLn t^ãAW.a6w'g }"zZ.L"@LǺxf|ѡAnʹ1*ҭ)%kੜAKgaƢ#/߳!nFj|vW5}(ë#LmQnҡrz&:r2,7e "6̭җk*qh[n%jRz^EjY :Gq2T o9%Ɵ @E^/oݖ+0C: |QlGXƬ{ x={%%Ujq,1y^] \p ]S*62 {=Bg w< 'k6a#y&+}v%s~E 6(]?c E_wz , )B_IάoC'U.T Q*X mvK+14z҈  0H*+6 edZ(z[b="E 6ϞL $h|,CְZFƬBZ$Eo=xGFcqw@DV x߯S&XY?S<7\Yj3á)L-ePadb8eC5o?g\h</VzVvZT:mh^J"@ ^oIf ·U'9qQoIwsˇGM?y~YF]9J{_B8WSb-NJh;QmMlW*h?gBQW;8l^*Ǻ w\*eGUpFm7yG:Q9b[row9=cJ>VO $1 MS^~r'Ϩd0#F|ߡmWT/5KxI~ ~P6¾=d_cmEq {ǣ1dz`V9uX\Xi ɾ@[BxֱrFfأ(>H/&3:'[C<{zU8L3]7Rpx!|j#b;nڲ)7i1sߒ8;c\P(/nyVi,rDQ2SPqx6Į-{`Øv{ K meʝѧk܀.CL.g.hbL5!z37轉ij%NIB:l#iBi՟‣Y*SPwdiǖc#iK]!Ñ{7yXP,^NMZ{ }ICp&#Na,^eԖF~1xF_<Xp@!E͈0᾿Z@;t$sZUa )wE')L¨Зȉyip-q]?<+sV e}]M}b[20Uteoycc2? R/l T ćK(uR:fah rņ2ma-5{- NKU`}ü1 m^!cޱ Ԡ 2WfnfxA&64q( 7'=s/M'Ԛf g~O&PUe%ۗR $qg;k"E U*V}Rb6e. YJQ%Ž aM|K47ڔ hr1Q gJ ja*& AX k73F=D3G= |>c1_F ɔ4 haL)վh ȃ X+-g1R 5 n-4 anyx d a wՓ>|H d[' -VE, Qu&fХ0,,y *+QYQmu p3:{OЍ:~P X l 2bA z0tuk,ʥ'& A('Y5"䒮 ZHb.TlK -PQ2t_ą 5=6J⣆B'` +x%J)Aj6g q B: '.S9]; DA-(# UD :l:oF&Mt Ր z$R=y*z $#jQkB^a& 'V\ljF. *}}_;}rPuq 4ᘕz4L^2:k A䕭2=ٔNE K@) v9 PV/]¶rdT6 Ynp.倒j0;ՂU guUhaTѢa? *LX*w,} l{s.b n'5r+"4wE =5W^ k '2pwg2\ժ h1}Om!3L \Q/|J} Xz /l5:)xv&H gAK:È ŞSV|;>a ۯ’:s2c k}e6 pZO*| }nUMgiEl L~1 u0  AdXSs %v`u)[s5s]*Y\ '֜3K*L+z ."{t(7@9 8N{Z</&"z 9Pu>C'4[iq ;):nF3u̕pH <?E */N2^ B'.m_E IցApɼkLY bj&qrZPpMʹp gIqڤ ۊ=i tgiQY z2n.+LMټ0m z}kf5ş S]ci<V- N<z<C}eӱ f}Iv9 %NlBGPkpb SeUKHkQ{ ݀NPH d@QԐHfڊ П!:ql2e7 0FeH2OK`ZF6- `ƼΪ 炊0u<Ի ޱ,ˆ9d6 >ij ě:8 /NaS Pi((b0 Z񩧋a& `GR%\ g- x̠O m[5YԜ 79ULFT "Z FjSe*p K"twz"Lo S^Ű *03 XR)+b{Y+ 뮥Nxg2Zv SHJrmPG_\ ǰ+0 UFL]$d 9%A cj1K\<HG !5;ƋBex(3 )HX8aq(ȶ !o,pݬ! ZTYanA *'`nL`K  A66} "H @;fsك]!z- `f;l 蜓 2K⤲y S/[򌤞 ds =+):Bu.sI AOt|#SkSC|L G] J-Ÿ`U^ MFzձ3$΀\b=| P/N i&Qֺ Y-rr^M$3`4 c2O%܌q  dZz 3gߐ~<b YpT _C%P Ht.k`(T- &=ڿ@`uv(˔ cw0it X= fowXbds dm瘇9gݼR>Eˢ AUF%:0,R $aŶ O"w( >lk\,#8Hɒ ?F>, Ӗ u|dYz֕hKbl<8wVa*P.9&LAȤnD> T$K,6YW;2΂a7VY226{>THG) dXT G!zBC_m]x>(ɞ$ud^{  - b$@$!(7flV+Drvb}ȋKtOOqa/XWw]lUأ'OH(L)+ f{AW,kBsu\->NQf,UgDSP5SVDslQzuWG=ܰ6 m@OCVƵt8K!o;Z\j<=>LۻfbuJKk'u ԅմsqVF'zH׋%KFfj?7۲VU\/  #Hìa.<M 98+cz4# Jtf`ưU2ƜlJ< kNu@DlRCK D'" 3 o^rcJ$aJ}Yk7})q6Q{B&`7 j<q~ b{4Sfd䀾ڎ bE&8YA jUm5IyM%/!_N5LwDVN9} ܞhJO)@6ZXJa zkN4 kġ|#9k ~)ؖP6ֱ矀5#g*V,$`dGz')PO!82!L\=s32&suXYzTVZI5Ɇ8{X7 =ÄJm_20stQU?5+}E3Gb)eac3a[o]bc&  ArhCcz|,+x2M2*i!넶03b"+ym=J~'elAF+2SgW[" ތ϶0JpKS{[C'~Cb ]_ sKaSk>ڻb-9CWYg*6#J!0Ll|_lv͠?Zhᭃ/zN!'xVr]~A!~%꼉 nbX;6x&N\j_HTm~K5pc~xb)L T~U ]Q? ?mdY'{ܼX_`KZ ѥm Q+GhlF^7+1qr'!tE?S4vǟ$0X69n0{-ŷ1=+Qۼ< ^sOR˻ӰHiPSCnPWJɯ[ht35q7)u+l԰SIH Rg =Ew?c3\v\@ m!bYɹfhIKRybE2>m{ r[\pȸD@~`֌"EŴ DRQǴħr/J UF ug )RjbU|zД%C>h-|;Xt%bVmGt g5yJhYD.]y~`7=k#;W6^ֺ}O֣zn L_2X|E"&{VV俓"Z'3ZB<[_f4r$cRAʕ}Z Д26F}$SIsQz2OT`(kAkƔksk%oedpv.RTjI[YqWVn.x$FKwa0P)Jc^դ0tW02^0,"8Gq؟hgha_FaU;dLz )ropyFrS}Fz]/\6,[I`Ky_ۑF``?EM[E5.UQˁSɼv7gѴ #"*\}݉]byq .a6`}IL7 syz]!acaMEOZ8x}sh$Z4IP'@Fjqx l_BH[{ϘfFzhEYYJv}7®=yT7hS+{|*ڦ w孴%g?+N3TR[(1)e%xsRj_ <i ?WTO8sZ !!5VT9)tJ(`)Ƈ'EojC~>!XU͆?IraulO<9 w+ID&DGb;/}ט^Ha Vt7C뫬'QT5h +0I(_x%/ZWl9H=oוP}`5 ;?(85_j,oj DP&TƧ|d7.H29E!~o8]n&ޏ'@_Uil`7ƃDEF.Hk)amY!l x8ö玼Ru9^AyABv3vD¹ ᒾjRKB574+(x\ fj~s#瞐fѣ )-d[i-D -/3*#qx1</bevRk|QLòDV?`>:ps)zi0<|0_O=\9#x6-[~F,태,4,L-zdT#w IXsZMFr~&d&yoNbd<3]44o;CkT:2']cV_d=ܯ"—fF20iH1Jn /:oo 1vn-- J&l{!u4\ބa/- — r0Hve$/]+a6ŏx8tElK#KQ72vbU>3L@yoG"\fbĕGD h@I VGwSw;{q#nJNԡ{H6MH#zl^R8nb g.\T͟bQ[! PFWqu|KL_,x^Mԉ'3[#`LR|{Zn6;NzMdSO b)n&T L㣅ޏ7XzA ~kj`\ f0_{ AfQqed@Oc:(Eui؝e߲v`1ޤaÀOljk?h0~qq5^CDqV*M3B\<快fMАA;uaX`٢!Nc0~-78cɀ}?y==B@v*pxNVhPjWQ6|:`ۏf$WPE@{RLmۣ 2%^!Bk~ɴsh03-k.|EOo1z? DfcO d?uLHx?I'Qi#e;èeh)5Zt6~/W_ne;w,A5ԒS)EeLiU'm=`um΋6?uQ.HJg,5L^|/ܫK@*DfdLv9xRޱv?> )ڪx^شn|73ez% %{FDDz T mI@bV52+7LF&>ª`1WcČ62 z7veU\z$ hl@gIZS@%5/A]&;F {R@;ˤJb߮Oj?@;sbh >j5vQȻ8ȸ0NOn^EB.XV@Q Xv8}Va Js{Q~M ,;Q<?&>39FSc{ʪ(].#Zh|؛i9=Xs7]qd<Q4ף~]u1\y U?ӃWad4YZ7SbbaP>+ftV;P?Z 40z / sٌ'h[^ +J>z̗W!_a<g(|6PAm?&6& +e4ğ z.|s{BX\iO 6!}@9C**!)X\d7))V,/ 䛋ZUח2y:HP 0F}4֒N-~u4P[)]GR0$rH2gHjL7`xܹk rTRp'l38QGyύ(mV#yaC",/Z]<߻Dn<%S ,&xq$a.xyX֛Cw mv8<\\oZ}MGi׬}gsN~Zd~3eJ uv &ySI"XUW0>j6|ܸ܋jBhH 2-+;.B YJ M`6W}YBW4~lŗsON,#Y,^'v=%PHUbNkNs<Efٸg] wl,oxxM ?IR4㪍Һ[C3^'/W?Ⱥ7xv>]LbxVߥ[ tiY@,Oܓ'FXK}Euoz#Gx' &0Ѱm,0L}pNDǟN%j {43 $'<-9BI1'!,+dk"yc%(t;,pa$1fΑp=𪅢?ɫ!;|HXgw{N;.^$ȏ<AJLG8$ !8UevSVNVyi`Do@Qw{ڙ==V $ƖtzEf.9tʚQy>OS++{5kw 0zøS .Tl`Ć > |Q/~ t)%1WA+%TNoR*= BՌKJv*Os?~Nd)1$64CfowwL+Ђw':֛.ڷ؋ޗbl]b>Ic/0U8lMe`}sXZ}rݾ_Faxr:,`㌳_ Yՙ`-yWp{ : 9 ^ı.Vni L{5XLxK5MnOcĚ e!;IoƖN!$*?Sjh| -*5E]70P%df1vF 94`Q? {+ uθ6xׇgLʡB@EKkb78fA9I`[XdO3vӣZw_`8YP3){vTc0M]يB= fqRu9l|5GtT lpUI.pTw &(K[>Q=dSG!a(Z}xM5z"t2#8(qsGhRMbSSR|JEz; F49Q/5?뼵LQ%iz<ªGH%*OҙoZ~ bJb]4+P)1|kȵ M&)*Im}1V4t=[<HX+$Z] И=npq5!>_,X0]1KPᯅW&+nK`S+jw6HAo?^,Ǵ(y] jn , ?D>HͿC#^|Z_O8Y8Q[dm14R?NR.}}zHAoLcmz 6ݡO,4hhKYi67ti36RJ3\lot\o~-nn/Cbi<ʤyxbx_lEQ{Vp@-=>{E)SQV]ѱ^r܅|ЃDKA25:ė{1>I6Mhbδ)o v^^Up)\^4Yܾ ޹hBzu[B+Zq: I_$Xp)O~dG=# Ejޑ*qϩGWҟ?A'k:F6J.5uɧ2JGw j=qYrQ$h_.L .T|c!7 x3V``  I PG :aڬ?MB*#XxB\Su5ߩiJ=ۚ97W_͙>A6Xt }1B>|lnnJ"C$8$znkHbbOV3eC8x.4x`@ZPe F -- i"_<bS߃*\fu2Rp낉<7}h?dVeB2ji~Qpsw$j?>񌡂\SU_t'ߑwDG MQ%'Qp-TmdCY9ܟ0PyHN48q^̓DH%^9\=V =258`7Aa׬yє&HL aHfR"s0f_1w'6~3;AhQךӺ^ozTsSY/?lZ W9 @&;_͠ie#"epoMY<\ $U&t܅EȆSuo}j/q. BuߗP0Ņ3k{n{ 4Gy;5IiKK0 \6T馪oр~6::@x-VBs큎;W'݁.@ 5? ,%Ɇ[rqM ,:& 0gJDZMY˒־!k}Y؂0cqdz$5CWn|oM.F*>84;!)"J) IU%Rb9Xγk7G >|fϡz9( <^7iDP6h*@B(w[5s K7(Lvm؇;CNFsL]Z]6i+Wqֺ m 2F(##:i7ۢ=@>}Ʋm%?M0YVց#*g57 ʝRȭOIe3{K& `Qdd)[<.U Ǫ]6>}`6<]OՉN-_:=Dl0] ˱O^jF]]ÖŌ83Vj~~gDI2\O\Jl #s(Pu^O GwI!Wa)TwkGck^*#  wuؑFi럃mQ5 zb"9M*mW5((" +9 c f\6C tva]gR 92.=44D޽2EK_{q|X*h IVόpg10גH]-Kk$90Xg0i 9KBj Y ZY1G5|fk  DlGl>S'tGB,k*mq wΦH`S[}_, VQCU^jjRTi G=pjn}5%ӨappS'y ]OqgI+悁 ]} "y١%v,iA!;w!]\ܚi0 pLR0Vd5tgo\k! h\! <=R>5)cv}FM50P\)x\.|,R;oԃB|"Gwp2>v3Gt$4o&G/RxMU1?cÙu7UXv͔7<Q,v_U{@Zǂï&\耤@2V i8p뀖^VY\P>Pf=RJ'lEۨCwDG:￯&*2c :bCq3[9 ]偱$'U irpQ⠗y 9|, J %x JK"Z !j&WID= &r/ ,Ps ckiY &y1Z3#}q|k 4Kbl& W 6ЃqH%bǏerZl Df$,H(fZc7 JU7>v[Ns:Py Lio]':a]瘀  X(Mi{ db4[wLުQ5d&!7 {]׍g۲R @!p@!X @t?n"pԴ w=Z5׆b ?AnƮ,5v( mhϖB<%$ʺ x*8 \ 攞 ~kFxK +M'yLbnln ȆBFhDk}しgF RI|8nU4 cDP|: {Xm٭Ս#! 3Hl{x`P! 0^kHCSVҀSe!F5U*Aw&أЎi!9kBhe i<g!BW/\"4% !SxW.-@zג!e9{'-2z?%5!z{hPF&!%, ϲ)WI!U5eoRx:!nq][^z;3!`8k(|T %H!A Õy2Y^=!SƖ mu+Ow!ցj`u[w?R!TTSzW[V{!DNE/!C+߼q*<uÍ!;J&CGy"ߴ8SnPS"qϳE,BQq"H3+"F|ExPmM+"1($4j="7daH08pؙ\Ij"D]N&jܼs="DK@wC!upN+`"JN"Wl=*uT"SࢠoZ/+"TK :xR+p(]PG"_GrE}=NJY"ck_*i"3?!M@W"druҎ!cb "f5O_u "m@[֟>rBB:Y"nL6xp(" .(zK:׉=a"r'c^" tr%+""Mwcϯfזb"탢uG^]WGN5"ៅ\ιɉüސ"V)„XM."OrDl C!x# 2<B8#/}ToYr#e⟗[Nߚ0")O##KT\!6O#$컣ebQ ؀\E#%J;Fb3[UB#&^Lۻ:#@neP<;6#'~#W'68nApyH,;#Y8՘QʎS"at#c{; #sY.e?3m>#zW)-} Qo?8#ؓ!ޏ_X yuMK#wD >L#yرW@#J*)BIWDêt#Φ =v >#s.Ea#f9<TDf/#ʟҢ|$ <,9[k$@0 &x4$rghȂ|PuWӌ$mT @IIh{$"kޤUp_6-,qq$E q}t9$<RRuKP⬠J$ E9*m-$&V!mVNI+rKoˈ$<3e<nj2:5$GK2 h )GK$OZY&>qU R8w$T]&#N1zVNBxN$eٻJ,ӵ4~JU;$ff%di^8!$r"'XգҸW$y2c"Nw$zF\A߿J${ZQxBH2jM$|*]n"308*$ȯ98}D$κ(\1/4T6$?AtnM$mf~T޾;P)u$t-2d VVC$7/_<.ߒm$mcWMK?wk$ܤigDNR+I}.$,7H--Ň2N$k$r{l2^'zT$w7$'6=p-śÔ$%%x|q4$8/j"{/`v$C\}W^%Jkduk}-ҺqV %TAO!E3V8{$J%bB|.sja%'uK)d ũM %3_zxF8DV%6^->6:#-7%8HkOCiaЭzL%F|'G0^Ǧ*4A%Jq>K~ҟ# XOI%R_oǂ!vPnZ%Wά ɴF(%`G 4S'IE%cGoq>P}(%q[Ek0O*s7`M%tW]#Ue"%~ w WCDMwE%i+ηK1d|@ %8$@o@/9"g%ta҃W]96? %g57þ=Hg%x$)*n4%- 1+<ÕL%$~9FMorN}l%ԒH6T>D%σ4Y+.0Pv^ Z?%^$·]^%Il!Xț%#2[\KФ%I;n ]&ȴ5^$Pg9&'q}Au,&P.gw@CZq5C&랟+K fݩo\d&*Ա 羐&3.XPIÛ7Q&5s!)9l1o3f?J &8>g6' V pT&<Q \&ųL,L&QDw7'"^w^&SNPl5sĺ&U{fz]Xַ;&alO sBIQO&q6'8?Qf?&v%QS4*?l8&c<c#H? &A#w`ɔpl]6&V}YX-NvG&%:7`ӊ~& ΅%Jjw=&8pi/TsG&qKk'& HM WT;$x&yf([FbϘ9&ɲGY! [ Q;j&7y<PiT& P;&&N(ko^gDv &_*rpXW8ɦ44&Ƹ@FGw-&QdbwWDd' 0-w&rr'@b[>?2gP!u'\Lcf21B'9+ajʭ<I' 37^#oB̸ZVx':6]X *}6b}`'EX!ZFIQVn&RT'G^YrЛߎ҆sSE'K`orS'R '<Kx@-Е'TpH z47lG'ZMKM4'dHR*ţb1$8s'mv{G]MY'2:kN0X' o+n3TW~^'v5*pG-'ΑU/r5 'tXc Ѿ!U8B]'18ñs'!]l\ߌAYK8'f2Xh6O'']/0F£̏'T'S0Û4P'aKQR].P%`a'.MxEŽA4kf'z<'aġx'gl-dpC"0':|[:gP>i'CXYW)ЯY\+'Z}Msxk oЏ'\[U1$wi' 8lxWgpk|쵡'}mv*Bm]M '۰SSDUB'uH{x ȣi(?Q@FEPsG<( v4P̋3-@ 6BV(#%e=!'~ (* 52I>(0R.hҭ&(:idGr1% @;(:mu ԉ_1(?}1,sCP~vٰ(C-" ТoJuD(d3zci$i!(fThKKBxIS!p(h^%DD({u6R"PMJ(n7-U/?~LmA( c҄OB(krO?j /h(O5APd{N (SH}" \ (x#aړA #(Bc}*2qC( BSR0'쓵z#(;B_E;ӂWB(gosiȞi(_2:Sl73,(e(n\Vc9iT!YB(~)b $׿k(&)W?K*NN9N~D):`"WȢ%))]nx7D.e)KIݏ2E`])]BP8Ȯ?v؄~]5)m) κzs#)O2 I~KR~ )&=v p )+a0V4zJ/),0t\{+n`*)2v{ҸJ)5[(C?ϼr^)<<AT%">)Kkˑ*b|q?)mgh番,^wE)zzjt?a h)}/T.N^5BͻVs)Rf8!5Hdx(Ɵ)b'al| D!CP)KĿ9Y436/)=N;pzG%)CD5pJ&ZT2)lMWwtт_)Oă!('#vR1)2s9M)OO_&$ɥSB)4 ߢWTU")gE):Viu.)rAZ ƒW+)-,^G6*ZWfFXEݡ̽Ye*4 ).ց*&]3~L&x*+W-9>ՇMm6*+m *,W;c7*,8b0'z7*-T<aMŔh*0@h*;(*8f$9 @ӥL*L4xn!C} G+;h*RZXJWg"&JY*cl.`7CH*d^wK;{۴:*gÔ1ؙLM®bV*u 7,ŎQ{9˟*y}drKGyRa*|b86`ȥ'*dD0q¡Vn*g2YL&*&1U{*{-羝 kN*(hHӨ5;*BVM騮( U*0HnVB*иyP4p汧E*ZoܜN]*ga_r-hNhK*PaȧT*X*•ڲ*i0KkQhUL*0&V7e܋؊9*0!LߌX#nyf*L".9{L *S!E*azc:BC+vd񐏽!V+ pqI Ka̘T+ Y4<;+ *rW_*:Qi#+ n-  F+-kJ@J9<KEU+@Kg*;ANL+rٸKL=;w+"8_K“^g+5 ݼ Yn, m+6kz6Rn+:5%>Ҙ+=c.hFƗr+=v`r f/9+DU{X;aӎHr+9+GZ5rlH#Q!.4+Q;:W ˯c]~o+]Py7rXpw +`[dLM\Q+oDvI qm&)+wڶ>v,q,˲Em+yke&6rM+zSYhdhPJOJ+};BOHCZ+`]4=_=Swx+jqOdrjBCehd+!qI2֮!.mED+3FYx}#O^+?;NjR +'BzB~C8+ XUۄ10<&+YҞ*F?+^+I]0I},+f&p2U잃6+rM Gx+2vKu.]BE}J+Ԓ`%TWOnk]+-yL~Iáۨ[dM+Ύئ;iIMCk`+փ3"HN;s!8+ٽ)".i$">/+oI8ޕ[ /nt,Wwk@<*1$p, %O`'6Vh,!_ViKOcsMp,-Xtr')ݢ8!,FbQIH:>LAe,LB6yxcZ@H ,L͙lYl~,Za?m? |,h%y4Kj6_<c,k[4oiP<hD,o :ht?41So,wF.yu',l8% =B ~~,}5b(v,)oO [, v\|/W,Čd ^耘t M,І՗$%k=IJ^a.i,/=i0$9ϒv( @,ܸ{̠By~qod`, g Xo},䇟l:~I^Qc d,s9Ȗ[t3At5,m,(Pm#Epf[`,'β@"wը,g]\'r=%,PӸ5G<mX-m 7m8w-:XCB` -C8Dr)7KqTTM-J m;f~89Q-P`FpqG-[˖1PH+2wxG C-frR^AP ~-jRpuRt0 44-}o KXRD-hp^fNJ>TD6Ь}->O 6&Ky.~-2]-4T KJ5-Ɣ'd<Ѭ-JHYf#ن-lg7Œ\0.c oGq=".~,-,#^ʹuT. bol#R5zt.t%[ ̎.'F)_<Ev6].Td Zc*N6k .` y.XeP_t(.bqBd9%=ʸ Sl.yq dgNRN!y.ϡc&x֢Ĭ.1wL^~vIV.||ǝJMtY}.񸃝[T@wi.!)Zv]nJ.-HFic.+L,ntY䂓.ǭC4 9:.3`2֋ϣsgH4kR.ق/_sG(d.,?9^hMS.(w-hOߍ.H鐳e.E[p33/!vCK],'z/XL%AF+r/1gltI `-_/ՄMlK[~/%}8zaov?V/LeTRB:/yBWi}//!RқfF  ^(KET/(gPύ1Hp P{:d/*z9 qs e}jM4^/;v̀zP=w[/> 'ڿl/B =Q13$/Y/S-u(2w@~`/U^_54pvU[g/W*Z CVZ$/_dU3j:~/ahlml/hӉ~o:rjo/Sr/mO3:fK_=Q'/m`Z9[n8/n`ԋ]/uYql0Ds!`m/nk"7JC//oU5kg/Z=D6濂~#/||oTz'ۇiY/Y-fw,8>(/ ` .Sg)!· u/\˵'w G24|O/Ӌ$KU i,/euHjv/] %ݿ}``H>/5uw-ƺGr"/љi/^ᄌ"$~Ň{0 Jf6g+~Q 40ZrUV Zu[O0PZ?_|SL?!A0{ Uf0.sϿl-(2b103<qPſt 0@ӝhut0EDl&[a=Rb=l0MU쀒ZG_~84y]#0[z80}`_4JVT0bGʥ>w91͜#x;0grb뇒sBYK"j`x0rQy~<!ߑw]0yqZa}0z.!^=1#0-܈osHȇm40Ð G`>>{D0_P%[5DEi06Kz5`^'0 Պa?,SCy0 ,ss珼;/09\,.)뜾V0s\%]=p0B) IVLw80z7RToq/2n0~ɢڎd6dޤGo"0ŗD݁D50ŠYIjxa}Yh0я &dB=0b6r#XdIA*0ֶCTc<)Tn.X0rcB09҅-0_W~in K02\.Q\`T&8aA0!Y+'8B0lfMPJ =C6~@uh00eyԅPjsv06gk 1 1k((#!qZ1x4>1^ޢ~ȅF+hl1Oɱ^i{M1GqR:ArK4}QG1AkД@ɖa *1) ztز1)xAks% A(C1CSo_1W:9l(1Ddoyɓ 1NOѫ0 e1_U,61YP skSYeٙh1YdJEI'DS1[Jk^%j1kJ[nBF691ntWpUov1r̀fQ}Pm9 1y5+<V[M-1|([.]3U(c1d>MlAyVAi12 G`#71=;X_$LN(1m*6vVQY1HEh Ȱ11j&rlŜ 1ĐE/gV/r*9c41$O9~TB1$( }w)&7nT 1u՗k6?1mG!svδ%;#1ASpa\t.1L?bY ބ͔_2EP-xl2g>]!\/Gq2_vV@{27? +դE܌28A.0 Qȕ2<Oh'n$KP Ǔ2Nf65Ri编2U:Eu֠"NP2Y )א:]2tz#y8K[kLg"2"Ҍkj,L2)AZx@8 g2;VTNO\;2V:$J;JQs|K21B9|Z}U[2jFT>j52Ň;gMYCfO23H#g ?l2μ@1eUDUB2 3Gؕͮ \2h(G~#6-u 22z~F{C:fW2UGɵs!2V ϣT8c j2`L|LkaZٌׅ*2 ^9?+#3 +d>q9ae t1*eQ3DdU̞"mL3A) l?֓Z3 2~_t|fbǴtW3/ g&\>labU_39ؑ{[S2ף9|3U[FqaOG3\/RRY}3],ؕ]ͳ#3hw{WVBܳ3i֗y:m1,Y>3p^qTޗ]c3}GI3Oz ʐ+U3.=Kqxr8MR3LJ]SqO磕$t# 3"ݏ9$i_ <31z᳟3ׂxKxaL[Dc$3:W0X Ja3ᓌk5\!/j?4. 1V/3!4c3KMk4$k#Z}`P][a4?UuԜ 64ANHV!^,NUJn4C)B#17=Z4O)/ `Eٽ&But1P4S0!7a6 4/)4_}6=W3Zn4cv2$|_9~4x$'S_ݶ؎u4zp)%f$" 4{䴪aarJz{bq4=kãKL4LIL.GEzX4(fS-eTPB}/4-(v gN-)QpV4 Tĉy{Q[%* 4-'ڤ:VcHN4l/ϒЅ3b 4 TIɇJo4~%a[m4@ݞq4t`x.0md J\F#4TTW7ǟخP|ϴ4Knlj*{#4ޘpg*|-4ŎF;L,qa;Ү4y_v&u-=4=*ɕl\N;s 4핱F+ґ_4Va5T-Cea49eO /tE4eDҡlMe7Nh5!vw#[ n5h,:ikn^a5# 34\7sb5#Cg}?hQ V(g54'8tT֗-ͪҩN5=Э|:/E^,h60[C5=fs0'6УG5BU#ZGɚ d+5D5 _ob6V9w5TUZqldKts(5VS:-.G׹ʚ 5^7;* :'[gienF5_tMf5`ƆIX?-_hӉF5gѶ]h v3g߱5pwm7Qe.5(Ա%u΄ d454d~1w5WaK} cbQ5 O! w/R5UQSi::=}5>Vz5葖5uk󶽥4>I:6׌DC k}36ѣZ7#F6`Fލ(3^J:659fЗnO%\66PU,y ,ξׂ67$qҞZ Vy6EX- NN?X6^#2U^.Y6cґp {ܤo,6iwԲ,$sCfb6qjv1t %AL#66sB^KqzYՠEd6yӻ[oWp6k@m!}AЅ!6́>&M9X6oZ *Ղ O'F67ZZJV8 Q63UgFEB|kR6Ia m6SC<N>.066@_0pRLf_"6W+ՀRv|Xq6ߴ >^«ds:V{6Ipx6Չ)L}Qï6thi?A~k7 $D&~z75]Xvh7g/hO$7"j^Mk=` Hq7#,&]/?267'ނtV֭Mx7.խqgtS70qR&)6, 'a+o75D>t~Ałyr792Х Jx6oں7<hJM4l:5o:7@MQ1-yبM4tRB7\p-v,~7yV3`nA/^h7\jLTV]n r$ʾ7l}ޑawiE<_7jZ|gQ#g/~7rVN7$u>*;v U\J7RlWie&{8sw-7pcۯ_:9t7>YL*l>7?cQk|*7)B7\ <7 ϤyĐ:l\7d2.6Vwl07뇋(/TurGbtV7*RĆkH7)L͂cx59T781%j9p1 8议;i޵¼ 8RrHd(G,8q9R{;H ;j}8 jQ$Q4K3B8vѾ8#>ni=6Dl80ޠA&m=t^ћ89.n,&F3r8>YM-'F.c."q8\ 'D9g8`p [!C?8crfTsMXyv[98fwfP`pz8r t`UH8w'uֳRW8VkRiNHs:ҟj8v뗂8aXA8:j!]Gɯ8)S]Dg*E̐3yg{8݃┤XJua8m| 39rV$'8x4n6JqU}_h8bzm/8M;>e">8Rmv{<8_0ŶDkaOt8(`gp6|@,8]-eȜwsl86v'+"NMt8ԨдԤA9n\FJ)ij90|K>9ߢUe"F9Gv۽W8)=姐&9KZ=bNP?ߦT9}-Fd΅PD &#9p Evj6Gmi9Z8u*#:B%"9%y6PBc8SY9w,ERM "79p^i+K. q91גÁo Γ* 93!HT8 _9>l:6:9o@7/4[39co Ђ[,w69З3)0.KL @z9͛e=hHa09 $8^;`MZƖ7w9>r8[sLRLdH̟9܅ˑ=K _멸lS9瀊ef$w69U屁T"8>Lnn: @7oĀme':N6L*9@zx[:H+.1~k^f:Xo!|к[$9 n:)- |CJ΃^^ƳE:4`+ :9wXv9M:BNć̜RgT:ORR:RwfH(ih:[MkU>Yoz*.':\TU}޿;~,1:_JKĺja:`6A":g#vKlC\l,:>}v^slL:ma;SZ6:-ph> S1syN:0nwI}C:Q AHH:N*j~TsD:{ͪ}Gls:6IEO ;:XUȟ!6[ 01:0Er,ipnxVb:oI+aϮM|:>.,)W#:[x3l~[ƾ-:q0KM:HQ+Tx?f<,EP6:^jñK, &o:7bc :S=f+iNH9:0&s9o*ztT: O~w9\ 8.V|.;Bk]DIhR;دvA^ݵDZU;  yB d;H=` k:Ysx;Z\GϘ=[nX;fVAR6rU-;pRL,NH / #;z@$ (j[^;z\!:G;|mAG3+SZm7'\;e4j G);rN&n3=};7k];;& kU&[X6";kMy}fPO;%g.wϖu[~;Yح)m y;ZOj-AM͆;lR@Yu`%Jߤ;o.5gq1k;tŽ)c71>; íAeyp$<-V]䧞?P< _8>2nq< fMWB%Ůx4<]*!y(b,Jz=d>BP<)Hpl CVHx<~ARs 3=p5'<&+p R~</~jJ⸒3D9E<1k2^Xi{ce<:z; gȷ4i}<>_e'jGop~i<Jt6qC w &F<Wf,h4wb%<f7]'p~F'8\T<fŁj^;}2<qgH@TjA k<xYҺWTWZ:<ͯnΰ><u]ECt.^z"I<iA_UrL)q_ƞ5<.Poߧ7T~l<dat;X푾. <*5v͂4|l_j7= onR?^n=P}b;(+=!..0sԞG76=!pwl<ٸyugA=HQ}W׹F=K=,ި˵S'cc =MA+hF d|%5=MJp c %^+Zf9=Pńurɚer=T;=.3yY?F0z@=^Bt ͗i=_$nQ`gfYen(=ceG%knŹr=q/QǴ 'eT=NX =31ZwI ,G=A[D=@Ԟ=O%5khRr=5ηՌ74DЯ" =Q@MȾ={YX=@C9=ckʫ=*e]Fќ_E%=[Z%ߺEIpY>=3WOB` sxc=K3Ӵ"3Gvrx= ~<N! =d2X xקs(W=A`37_p/!> OVboHs<>ΡMs V>0[@cw_ :,c>-!jP\#D,V|1>8(Q zY1⡓>=%o.wƠ^>M0VyhkjYVY;>Z5OcF;>l\Im똉>rWwZ&L>s5θDY]Mi5CH;>t糉N#!NSb>aG+v$>IVLRsPbdYT>55yNI U>\D-#>G8w(ͻ%>& cIV̋M->̝\dS:+sf\H>ϸKM<mߝ.stGk>00'&BL&H>#,V2ZFP8;>0pmL`Suu>쵱78}~,#+>M4RQ`>Ț"ɊJ(t1x>jc=ꨴ^+2H >>;ѐV<zu{jY>t%m_ f>h)3Lv_W ?Q4+jGJQ]h? :+.Fߠaʴ?'jdśR*?3v.#UeS:󢫠?;|:# p߭?}Dv_.{RBo%)Go?Aih[?ܪKq͗4T?b_BDD?oqc%ܤΑ ?2L>H|9մ:܇?Q ?a}4,?!Hԛ:ϮH6o?9OBИ%j'%?t|'xaRo?:d _? \'*PT@gC?>X]9@ 7?ԗ잒2=h,?` St]]}4?oHǭ{ Ƹ@i˥N@@vfvHUoʷ@DO[Z@Uy+p&*s@+/MOR@y,A UbWcHӆͧ@=#X@!X>vT@7|>$w(@#BwdSv@@$Bz[*Rx^f>@N#?%E#g0@WQ OY. [&|B@Y Vy2 s%o@aEb1 obN-@oD A4Tk@t,P:V^6wv@z%ZnboT@Je?݉4N}Ή3|@IDžu]5m @r>K:sxBi@tY۴$313Py@A~GDNѫp\`@_4KMN_IfD T@$M!@k!@2_atn2,@Ӡj-2 P'|C0@ޭi h^`ևr)w@NBhQ'-\\|/s3AeU3Cڟd4A1)fZqLhdjA3:D VLܨZMA9`́feP_b-_ A:h) "JS#AU-~m.dXzAX|Bf$+%UA`垳Hi&KAkoh׃:o̘Awl5!ӐfARN$Sf]k  AvP)O;A*}" g/&zӷ(SnAd_̻rJnAeTC'? gL{ Abɉz\1:`BABR* apZ4A5c=BRS-,|NAޡ0k p S-ĔApY˹[^R'ǹ{AJ5@1KVnB"4*6* xhUXBMMaH>4R'B% B.G5p3X-BZ^Qof 6UxB|f+!א>j9J#B^XSK749WB- *4YA~\G7BH3_ jM؄BT3YOzr~0R>9{BZPw#"I:'HB^@8#lst?s13BaZ@޴b QLBf@R{[h M}bBg,PSԓ31L/lByb>w6bflcB}Rc9nS-)BB0ixVqBa:j!pRQc!ߧ_B@a+ BETB!">dB[Hʬ`BcUf}6a,GZB=^X]j-xnLBpd!f! Bw@}q˯rV}!6Bt@\Q cC-Bd*\3}tB8|j̚&fɏ"BA!6?D,B1xm!*1B{£M뮒3q"BPj.B) r)B B㑭,gpjv.B}*|uo~Ry*98B C%t7/{M @C ]GP*k<|+'jCb l7Cj8Ćc+zC#bR{u&p#s"^cC$J<#hg/f!V^C%A#b BbC.,T `RXįC0 Ϸ;NC@Wӵ,=ϙ3CGGx NˬCH( #_ҮCIԬQ.T݁"L'CQpPҟVoŷqyN] C`A.S3 Cc0iI:֟glmCfOseeMCt"bFNC{B-&+"ˤCg"esId[C2mU Y#sn CQhW wгFR`CŅ[ҿ"h% Rɲ wCMI;>VC.灯%ac%3'C6YF0ap4:C]%dPJ>\B ,8C{*|Gk(.CK}ymP/tD"ꯇA3#q0{U'D$aAo;YDF[8 (gDK|9J2VmLʄD L BZU kDfzӱ{KL:D# 9DtLis-%4 SD&0B8OekSUDB9@&̽t0Se$ DDw60:fHDH ^Q[ahԿDQ*ՎStJblD]NqKU9X\:VRDdU]hpD0&DeXײWhUkD ED+vgDk.W0f뜫D@al'()BLDM9/ԥ /=*ZҀDZ<[ bnlۺjxD ^̌J"6hW9DJ<I$R}2D}e$!Cc_wJD ?A;eٖD7vzSnA+wDkc+ߨdm%=ND,M8ѥ ABXDʇJɮW00D-AGDO?'ˇI>Y`D/"̷ǍRDh6"HW,D  /lAETDF>ɛVDG; [E(c|z%Fͳ@E2N:سe=v/s6En{Dܕ&DBnEoNzdzl6vciXEqS(5ڛQ#\iPzE>djGY{S*TEiG =]-uEVx5\1I3)ǞE;}XёTF\WE܂?ſ+XE~$*.̭lKcEdEy KEoP0&tBܪ,^PE;R hisUBcF 8\mcz@ .BBF% f4.wdLE]RFmp^y<}o{(,͋F!Nv"&}ànF!i^$D*g/yF(LW@)'hD^F3H(mW.F:/*^Lp^F>tN}č%}F?x*U 9ڭ;0jF?'#r/ d-`lFFJRuQ_"^[FI\xH8~6BIp.lFQԪ ݜ{YnFRɱqPPMضѭFZLmaqV<<şˍF^|eBo=g"kXFc}֙ԃE55aFl Oq;ASC\PXFm|Ȧ Ī~EaF}D-7SYa{F1fc)Ze~yF'|:PH8´>P}F. qPE1W}FM6por3;[KCQF$Q 2,PiOF.]y;Q#wFgx`Դ:rF{C9F` ث9 '=>Y_FmbFkB|rx CFȀ/+? ~]LFR:x!]Fԧ4e!Fߠ˩c3AF@'^La{0FΨ<FܱalAXa@5﯃X)F$ )؇%!9 Fm2 ̓ @rF FiO%uGN4{fR" 8tG=4CF(n,L.G8G8 v\lGVڷaɂ%vlGFr2}Gq/nƔ-ÝIaG({ˈ%gRw G,&3vG1iHV[F"U"G7 ޶0K%iiv1G>hs,kDOKm5GYh"t^&b<P9K2 G]dDoQhGi[ K`{=$Gl*F$GrbZus?Gw_EI@seGBco[R7o"Gul|h$gíG<M+RMƶ<G|H c<hсG'{QMZ&G9G)GNs@uV XZG8i VE9{G"S*! !a1.-Gq;lW)7's _"AG3M[,`Yc!G*5zBGi6g:=PkG$z{-0H"neL-NH:`p)VXrH糖9==c{<zH\8m S;NH%^FCi]6uKKHg XQLwH^F">| |@H  J3}%Vj:H |Ky$>< +YHh̗[?Zs(A氎H kuҭH}meH%0fJ! iH5lݞ5lޤvS=H7ձΦ1pH@HDK kNMWY$HEK—䷵L Xd.HY %%#B#GHi #5kDAH{x2+%dCធJ\H z C ǫlHqx<?,xQ^*H:ϟ,D}H@G zCuH/ !^YHIoK"&Lѿ99uHmJmɏ]H񙑮$ |I=/܈SIgpT+qIBId4~GuvjI&*t]`/䇼I/i Dg7I cP'PxK3]I)&#Y$s8wI0KޤuI0EKI6ōi--~o%tII>\B&~% `hIKW4ZkIH`RIT-ǒή+tIUP7+@vSݮ9I` q6*]$iI`Iue>"=EZIbnXډT9.0BIhH:f~ܠ-}Ij(C>-z?(Dvv0Is`]nҚ$ ZӦIt#ͧlqP:Iv !d]hkۂlIMR{QjhYILؤE1IHLwvC4y=I6hsc8T>IMޝyny} ^I?Q.W3O.I,,6MTIO &FA?=IKf1îO{db`ISfVb[ JPI*lcB,YpPI % =1}r"N'c8II.謜MꐼWgIÀ16jkQMYIx pVZvJV JN J#ORφӯ>tJ)+f1ڲ(bjJ1 nJ7dШFyJ8ZSq{ CJRG:xœ[ho_ɖJRwd4gB6郝JRJiE:7FJWB嚢DaJWhNNf8hHмJ]R))A"J`dvw_cڰJb fd)4S܃e?Jc}1-2mQ? [Je}Jp^!gI JgޏI~j'XFvrJh:N }$F1Jh喏Tle'3mCq89JqYψ\O)XVJs.\8ڐk;TjMvJyUDOsb}iXJQT*wCuX:<DJn8~)nRmJЦ1dJ vy<^J̡Es;X}>JUx#t-[·G Jj#@COJ'cMn!'0aGJ(v/cB:}?EPJмȮQ})p̡%HJ?{-LHw,Jbbkdm"'׿Ju@bv\(`JaJvBI5UO JE FjbJHf&7uBwSOO6J|YAĒ=jKp q0 @%lr@K iPۆ0V݋cB}aKE z?] Pt0KN*DM5T|K(5U?y*C|@plK8kpxL?y3K?J6+QNr]bK@kO,V*O|KAW=1̫ )lwY9KV[cޕuҦ<NvKX_Qͺ6!']v*eKq^uA'ɔKyEdV hyVmXKae$HcuK!eMŦ9=Kn;+yGKQ9KBR9J|GKpmyiI)NKp"3Jdf(K]X Yg1vDSK9xrZici kMxKvt0q<+HUK~nze;4?Kv,C/"‡K/Tj: DŽ<KNʏ6[@KJ,gл,_ZNXKM/d>SV'KGAC զK:f_|ݎK$ߦ&ؑvBd9mKn*VGnL|pn161 ZaL{h:b`@ם-L#rݎ<$I%L(qvihpL8y*KP7Cj!@L9D.;b=LN4g3fWzhLQd8+͘T)&*OL^^*˹w|4+QjL_%Hnl>ӍTLrPt*L;TOALu Q/g,}uLy[/2LrrdL ώfʣTL ƒ h#Y,LiYc&&ӪL{U>wp'[@'L_L32E4VyԘL6V}=#T{ͻL!ʇ(IgiLaN>#LgmL_jq1:ÐLC \ +VmϲLz5tDT;9 `LQ H=΅氫M"JJR7 qM6GbwTU DZY,M/甹xY ;I>nJM;ŘVlҗ:h}ML6r--H7O=MU-,3˰!fsM^G݈HUPܾ>Ms"wQ2w,-MyZnLдhL_==Z 4MsPzU{~M#W\6$EMUW ϪmoM|/`OHLŭM@] %O"OMc.W?wY Mb,pk&pb[jM$<DKc;Gh:sMuU(fvMƍ b33bʊO0M<G h. \Ґ)M YZ<6<+&Zg3Mށ*"lcA=y\M.U@B񍘈MfM~UR`M*;J)Vi[Xo My;Yɱ64RMq&Ah+, 4]M*eqʰIti UufNX<-|lG;&N$i`9 N1Q;Tqt#1N9.LFz m~CĺNKy O<PDy`NlhU990hy =t7rzNo6dqm,+HNݷT s>$a>NQM!5zFZ#Ns\ս_N7rNLzLN49m'ر~mN"ߜ|l1N{fY@!{U NÖ_ /v "܅N*&BU]gqN>;ZK5-1?N֬30cT8@#N]9''$ON֫o,#=Ug#_NnŹa m@=1[N/DzZN\_NQr`7_O VeÉp6BXTgO ݗҎC[ unWO<_Rp;O iXEf/Vagd\~BO"rM 9iIv)O/Zn"x%G!O7A ;j@9KEb$O9_]&=~WujO9ytm7r(O=xE\YZayYFݘEO>"# cJO#1$OS Izu*EHO_VZ0QmrZDAOba $UFXԵiOl&0#7OAzO3%ݳO9w?]56O`9Ӥac.V]D'tܑO@zI"?1OZ\^_V3O0?™a<,մ%O#n_뾀I7gO"%O 0igNN`/GO.L랅5&9:~u(h(Of"S+~c0]OL=S9W07'G<O[\4o#IO\âx0Au,(Pwq+x׶cLdž0\PsĬ&eRy(P!T)Uݧ|P",qGd $kP%9ʐ!:9ccje*P(0ʏK1&w]P6ߴ |,cy2P7/wP.f5R"zPM{}D-0θsPzI`)%/=dnP_Pth۔vJgd;)}kPi}3uLFbz>-D\LPNp6/_TxP~l4)q@$_.JP{6D i6WVPC^4 23 `/6P)vTIbOM/Phb$ yU+P'vTv۾zPz>V\'=J7P/|;*0_;_lLIP)d1{twלP2'~㙠R>QοNb?'nxM8zsQԤgA;8՞^I^BQQqσ6MtlQUa+e27j5PQ3b%I vE k*QJ 4%ou%meQY d>mvv%zgQn7\k+zsn:TL$ǁQ|8/phZ)Ǜ Q,|N1",߽rQFi(?ǟ`D;,QC 7:QR8@I DetQm$ jDsf'Q"A~suo۝IA"`QPO=$K9QaZ;*'{QN…N@BZN'X |Q* }?vADQ«5)yOw"3QFCYg΢ KOG֋Q, x%Ro.)R?yo%R (aC{`i5RD#9WIR ˃_n]jx S;R$Sc͕ЄkR/о7n|9gJbR1$ؐBGJR@Z3/ \[n'c 2RCT뙫t,ٶ<3RKSғ&T\'fZRXf~2..1܈Rs[ȞZg bR~JLHI}?^j{R ]h-8~Dv&RS,Wz0GTqRZWAV=Ruhs<R ,|*KR~#{adwe}NRӋC"H>yemRٹrmPc RZY6Q+nRU $ $OR䃺[Dk+ڎQuRj*dtsђބBSl1XdSSԶ!JMlS%22 ~-ѨnS'; %CƕS-B)w®LS4|W {}nU[hS7C9`eŒ]\ S87!Qr]vr S9LV,L* Z-wSHW^@`zkJoZ38S`%wõm q3SbB |DLbȠ lSz`1bx'Y˅S}ZE=Vy-/HS~M7󋓟 ZdSvBP֫#vReS`/A/lshtTS&\ەvhQAџUSirT8pSwskήC؅r§CSft?@t yzSɷL"K?Rb6OyS=2&yJ 2iϋS_zB1k'?CwS̨̈k }K!xU?tS-(8V|La35SyVaDu&\/]`k`SoatjN`sTS;&#=pbLu,Si҅j6| ?T̜'ׁzTTOe ?X H8 /8TL&)ύ|_"T:UK5DdPX(e|T9imt6AT$c[C}qF\T+$0G6EMOIa(T6[ԑMM$mAT9_\;B&iV0`T>6ɴ_)7 ]fTKF%Mdu}TTO.ڹY:TRW5xj_a~}T\f]XM".4.IT`cxʝ)K2Y|Ta?;IBEXk}Toᔱ#ZYT`P 7W6Ti#2ېah̙T'I_|˲\Y (Tb>x7[u)KTAMhM[T`NxݴuNTvQJBkcTp$ czyrsTa:kX{:T*Tj~O3M*+Q'Ta'7].3k7̦T؀^O֭6}I{ɷ^Tg@aYg*Q̪T >bL(KWTrěH+VjT/+z UVZLt_F\Hڧ1U,B!^@aAU/M>_7} U7;߽0^6 U? %cMzILBoLUGZOl^w87UM(P\nQ+^:UNXCneTLPfMUpI@IO!@U|0UnlFG3T}U~]1úݧ aUAw6XNMU;i U$ w7U z Y#QHDUkѣ0"{mjѮU2o-NG%CNUƕLy`ݳ3N,uK`աUNyY l_IUԎ*5 "caQhXinU0{|_$e3TbcDU8%j-Ȟ&2V9)lPfiDLV !kbr=c!{3V{RR1cVaЂKW%VuzQJV$/◁U5,0V6JYadfcvEt'_VGڇ+'[VOUis1I)VO /5li3rNbVX*kx֐h MLX<VlN,za+$ VpϹ鵟 &\%ėP V?[l\џbDVZQ}YC5'Ϙ޷V}h Lϼ πjV}cTjke Vk ]}p8HH-6V ;eCVDr)fzW 5_VϔЩW/Ԉ&'W+D5>°2W/A)Y&3WHD<d?t9WI%n]"[V-0=iWOhϕa3qUWI]vWY߹Mpu[Wk)Ly!= Wn_(N@~vWo>"fhv;WtmE%1GnWvBm 6wǣWyoE@ߎ2aCP[WuFx&]%2>W/Gd j!E N2Wk2FFw{Υ W䡸sM уWW٥HqEo!bW02zV1 /c. 뵫WJeoF/^{^>{WW ].!5pDpW٤!@MjngGW٫.= KDe0Cw"WF fGo+6W`x׬ B` !|W^.⟐Jw<W^$,h VW񌀱ۮУ"60Ju:WLЦg8ߏ+UXĨN ZXąt>sX ^r x wQrX ?f@> _kjB2X00= xw4X3(Nw+o?һXss_K8vM^ʼXj@MLVIX-V|AY52`aɦX8#$1sMgxJ-X;٠+ό}i{X<΋we3ǒ` Z+XD{$^y|w9ވmXX"H??4ﳬ0lXZ k͇bXj[:ѱ5 xc+ƙXr['\zMMqI[Xt7Kr/8;ͶvX m}ZX%R9u\+X;U0G8AG,+XNxB-BXĩTz&i|Wi5X<LX8m.ه!X%72XD|yaXS!Qz-XC]!1sq3w)!lX(b?tj]Xē^PW=2&U}~X es$a96&YΎq1*Y#]6C͔MY#y6 \8.6Y)lac&-.'\zޛyYAR;r"YGq: 3ǖ4/6iYP8nuU Sry}w2YRO73 t%X["YU*;[흧Y]~83Yh%|,!Idr,yYo ͕O9<lYuk $ HǴȷ`Y?iٚ_d@3?y{Y0q*6׬JYErBfSYխ/ӸFH!uY[~uK;jgPY瑱[q֥P0uYYQp#d֣ C6߶}Y ~PUhV0Y<M^`^iHYŨYD7Eߟi(Yۆ »'%U>2БY2oMa!Y]o[.TR:YE6Y'Cg'1bKNIQY=aBHz=Ͼ6tZ la7OM0jJZ Q<br+oZ If`xZ!;H6R[Z׳SgX$Z-+cZ2ry~iDaZ"/uK>(5! Z.dgY ɗ5Z279|~74!Z6C9b vxSj;Z7xR-**RZ9G)!Z<s=p 9fZLˏ(E8ʧZP]fb&P; ZXNB D0l:-ZY8m$v!juRfd$sZ]\!u#YGZf1{ S95oBlj@ZgxC|vRQuFA/&Z~=* :NC?w:]gZ&tB翗헏nZ\'PV#T*$Z)@>ΰ$/ZEex83%pu<Z.1a$1usZ]+TDF6 Z|k.lMy=3ԝ-ʮZ*p. ؑ ZxZk%99.m&iZ4D'o8L%fYFZ(~C_]z,GbZZȡ/P-فחZY~Pn"0aZaХWmZjmxx0VM`P[N95k M\+_[-87,Mcm*Xl[i->g#9r[/"cP IKn[d` )@4Du[˪6Q{Wg[!G% RP-m®Ua5F[,ڇ\a2 [4DYlϒp5B[8Ӣ=!157dMP:[BmE`^[K$۰m^,H_V[WG%S â F[h{v>atig[n>jfZ7?[o/Ka8tV.dI[=weFƚ)Ee&[yXH=0[m nO[+ptljJړl[jmf=)[[f^HKO[\⸠4 ׉[[$I7ݥ]:Nӆ [Ł(SS"țq[ɱxi oE![kXn M!K^[܋D[&>'ߧeP[ښHS [$D`p ,\"zj/\^[˩w5%\3m6ū4BJ\?ĊX8 w\A?H-$Xf\h..kվ\ %z[ # BcA\/RQ*O|Yh;^_\8/K`t_m-H]?S\aw4TEƮ`x\pe_`֣<|*\qǰ*<V|dǼ))\sgJ:.[\jё|@{\;BX‡kc\oK <Z̼7\̺%8^ׅt>\LP*yP6zu\["(*MT\R`)BiZA~\%)qM}k\MDJmJQIM\#B98dXaa\2wѨij}=\ʢCaFr')\v%zRzI_v\Κ8@+ a<[\LPH~;婑R,\3xWsN|\ߘ( 0s^Nq'\sR$ 8ˎ\`v<0?JT9]2š8/--8G]eml\ǻW<kH=]Z?,(f!](7t5Fzd8W'Ҟ]5*DQvc]B +˔s ٭]JD#- ^,?`a>]K5!hYzDF&[~֔c]VM!3,7Əژo]ds*s_R o]g" P7~/C ]uLvV(+:fC!]x7p&WA˚D]62 u1Nim+se]Z4DWM/} ]e\)^d㘵:.xz]-><΂;]d|B*/Eͫf2]% ͈rX{H|]lXq/hG]HiѴ`Y'ʧ,])e'|xX]"<h<q'J]=_ЅRt@ם]%j _u"]i<I׺ ),>5Bj]c~64{09?]uS'e*uu]3~V8xiݲF8-]}I%q^m]yC[q|]t !}xNU]ݣ4F p6(-mL]Ё}`xtM]g'ٝGs^,{{qF͜~#^>{ag^@HO0B[ul^V W6>ȯѾ ^"-[z$,RQ7^*{7)Y.q^4"5>j=^;/ 3!Y}묐nВ^U::D K%^\nGlTpt^p :>`:7F^svNs1?Cs^z@ˑN£}*KY^s?F3-"P^Qn8;3޸ڧ{4^4\^M_ O؆c^.(xD I \1|^tLk`u]1a^^G5ns!3.+i-D^E? )#. ^# 3pSO^;3K9TL!veN^X(><WB(^I7Z$}7ԍ9^GX>cwQClđ^+}+ v]^oJg 3L2 c_fh۾cH\Q_ ;=.1CA_ĶWNl0_7i-㕤+x_"bWěe{X_&%c?IizV_,xיM 6_=Yvn8s ._A|%wYvΰЛ_Z7~>#|&ܾ__-t3 :w*:U1*p_r#E,ЪB e_yd&3@4j_T>q7L)Qqn-^_1_·=~$P__ߓʅ Q__uﱇ&R 5`5_y;Y궀_w3>V]ro<_մ 1z_w'FzHHWg_Ԛ_{CGfuR`_D4OM5dמ<_Пc;q;2N@_:Tkȃq\_^*6a9jZ_Foc1~*ԺP`J'25e:<@M`[_WG@>6`# rz@ʴp|¨ӳs`.fKt7jgXe~*`0C,}`;gA8`8|0oPӕ̌`< a>Zm`CFnn@*;`E85K ŷ`J|Oca`b`Kf8h@ju*d`ZIpd78M)6Aʙr`b7yƮxr֗,k`m)X, UM*o`m҇x䕮PbΔ`oQ45Vr" \`s~XʔP-x#`xw^7#SMΞ`yơ4H|#CN*2f`{ >>H{HXY*`9 tc]Ebe8zGG`ì  w`Hbx_etM`G|y?ء$[`XIMVm`#>A<]wp@K`֬I8?b$ o`9bc\oħʘ`\; f`YPF@BïI0a7tGAza2]mE"a=泞~P(af]8an';=aQ!Ur{&fC]Ba%יRb:?\RNza5oIƒ"6:xa7P4dRMYaM?\aNDQr]ʈI5|*aS5MF"FXl#k{~a%s2aS."Pa D j {4ӫa- g*Za{A[+{nEGla.L vuawc3Ro~UO;>a]dDmhvla'&O\-{FajPJD57S%=aN'+V`7am6uUZI%a3wLgTAPҰ"dڡaZӮ1Kڪa}T QiZ/a9_([KZ Fa; 8Vx{˯,jauQ&U4ަbZ(ڻR5urqbBaT0˱bM?)b}Pd.WH@.b OĒYc Qb-X1GaG+٪b110z6܆|n+b@ `%(=s'K>$:b@-د6cbECY;wܞ3!bUv2fڔ-IDbYS0rOQ#EbZ2H u+Y8rlkb`PĻ-,T,dzSbn4bdC[bvt=4ɂk$>ܭbw/AAZu3։1#bbg0C|.bdd$SӁȃbla?zxgxb2+ۭhr}0Wb^'`{@~Ŭᎋb 3c"i喔b BO:bpAo由bý|jCmZ z b ={DrH 8tb`W[ĥFj0]boz[Q~Mb%ls@ٿCnFb>GuO, 0c4 5cs 3c Fud_Pxc3?ʼnOj>ل)&cm!%.  ch66mWc- Oժ[xZkc7d8A8:[c8MkȔA3qc:J 20c;af_.o=F@S+cJv ]@hIcMI_ ? cRGqkg0ەc_&f,1LclΈ-t{rbcl~gTWVG{c}`i{Ě)y BdJc ?q!v}c\2Pae.|ɳB7cٮ$g$+Ħlc֊/74 j>cLN[iqc"|YEXϖ Mcl < ӻhcp/_bKUcoV3W'*cJ;j{K”TŠ3cmkEI3cÍPq_^plUj6cNjB?:Mcw;u$eicmPrF}߲cFr #޽gEfd ?l'R2<KrdֿpM^| d2܃5X;ۑ?d<zvddHjOi9OɨSevk]dIԔ,^'dJP}δ/אqKP%dJ)Itc@2vdY9 |]C+ayۏ`d[~{ݖ$VdqyNlLQXu6d@ S|Ba$dC"__m@I%gd P70%yV}dX  nN&Ƹ@fJRdVKJ@~ zd[ONTL.HdW15C3midQ{(|-;Pdٺarwjf3^KdJ_~NCbBd-b ‡@e9a[wE8iSe#eyi6?qHR`XOeD@|Uq}eK>t ަ0{qehIL9LϨabe"CB8JMS8Ye,덴q8&*>rWYe265W]>$r_e:<,S:PbI.ynGeS,mpf@H (NeSB:,x ǭehL%#K$iMW4uem։jU/wIjes@ .%|USEUewmղΟjeeyU Fg}@:ư ez$]eҥr:~]|Jxe ;0Ky+kj#cɻe>H\!#TZ]e!V>xjԏae# ~<B+1e;yTKbgs?emZiL"&4tKe.v.JBE׉e’*|' ݩ-,PeØʋO\j"e#0Ԉ$:/`e#xtp F%xer]'rEL eWkB\mÉ )*8e>MD*PHbcL+Le=1 YoSi0f)db<20]b!f02s4<,;-kf9<S̪Yͩ;fwu6N.RKH[f r}#KYmf5=+,j_Qf78MPT\<Ѕס1fEFb]ćN/@fH fVfM* qfLG krU`6/OfM*8P 2WF+fZ}ӃRuyYt@Vf[˖MSym]{fh;wk`W$_%efr|at h(!k.If|}36 c^̦f}T 25f jm#GfdSw~6{?f8vϕ=%'ѝ'OfǠ ~ځYb}VTfR*IL.{H ftʩ37%D~9f:Quln߈kLm20fX´ȫ:E;f[1_ 5o}f;Zjy@q,f.餄4FZ>fڥڜtSܹ4fڹ Zimfx?o,k{nf-@E V[%fqvCzJenufX۵ZaY);fB9HA|)fO= <Md!gڂS+VIEfg*N8N{{'~g.$ofHPtkgm40%(F؝ލ9g8"2w\4UR賿gxbz#἟j䕐v|#g)8(ٹLNYEFig+}~g6)9;0βv֘(g]3lG)hOZg]`t|sBx:1TgcUuwQVUtQh/gw<gje*Wmgl"_]eBu-A:ngpyBXgf/gz1>࿛Sf5cygbѤv"GZ{ge7ڛNgoL1SYpg(_$EpUیQg':d/gW|(ЭݥgrdSig8/kO:'<gmC38sk||gtYVIa<fah1~_H*)Y⭳h,3Pr.վk9;hxR0;Ɋ/*{FWhYdx+Su8T6fhVw 30G[h$Ujqom֮ h cuݏDZh#ٽ@EXejvκjr.Gh&@>,C(Yh+7GV-5:6h.Y=j33h6v]Q `b"/$h8}cݒ},veXzJh> *q$՗K3h]anVbƃvhfv4,Ƕ9h:DGqISLch)hoJkձC/#jhF0(rRآ-Yh<> $%vA.hǗh I7v>̵={v1hDdQjMj̤ļDIhZ&^H Tqh,K˧+;-hg mmJ얹Ȉho>@_֜SD,ՇhaN}8'@uhmӠzUBiܿҞXr:}i:cT+2 k^i)k fxo aLi* W@DV/0i%_k | AKIJ i(-90YѰگi-p:U:o[qi1?:x_Ֆqi2ܠ&Yy[K\i8|n<Jo ORi;E9)GiS<U"tiBK ̡;#wiS؅vU`iT eWw:{u%iYA@0KiY }λWJFi0B&8iQrp6yX7-t_iM>LB eM._ +!izBh:XH,gyia+M(SàuiOvW^)թgri1u+rÎJiα6X_CePiu( Q[i&MZEojVox$t jRxߕ}lx#j 0OHh,\4hyO6jFǂ_nΊF7j!P}[}!ejj"YN?.$ qSj19#Fa0j=Axoĥv""(~ jNCT-L~h|;6W?j_BYg~lL9pjabBƠy]t44jcZCBD IuHіjm<,0pfOPjp4L:&I%z/Ljn|qCLe1/Ij',YjҺsn oE? j(D>N'<N0w4j`]0;,+,{j!l[@Uɛ9|9!qj׾pbt?>j P, 8j .Z#;bbjcn{Xi`2Hjу' [7&;ތDpJj>]V:7@j޷'W!l}t̪E.Zj]uBbL6I1$Mx4j8 2{!!i:@jEdxlNNyqZhk?u84maRj[DkfQj^+WЂzm٧kj<4wk&-VYm#.Lzk.+WEb<uk<s:,~2,\l]kCW5]oa@^UkQrQ.R珀UXkn3yDJN3%T||8*'kn5I3œM7hdko:nN{VLku&N9*^.:)mRky ٦"R~|:k}=XuUki+Gw @T-F{k\h<|!̢&7k &̷UX_եT03Gk>Y)xE&kÚ~28)34yl;k''@ .fCk!8}2h5k4&$ \y<<kçȎ~}kğ)/u!#gakims?&)KA;kxcg}X^9ݭGXku[SI Wk_$.?$xYkf\jLBl i &s&l.m'WB-ldlھH`לahl&搭mn}0I{Qdl0l|~afRdglP_/߫B0QtVlSt`P ]XMl^'-}?>1l^!-Чኮ2f~y 0l_ .wt)UQlaK3@Qۦ!lif:-Lm0v:8lluhUłF柞lTS}ajL<9 l)`ُP#LD&&lFKw,V,P!2nl8)ش{emled!D~!rT4~l4LΟ}F Ѣ<l֣%!(;wJTHnM6l̦#FxnS:T2ltzY @lݎXԑYG.)#{aDl 3qF76ЖKlVjpվ`y^mJaǔNQ^JB{(m&GV9aX`GIm'C O4t՞PXm,sdE JOm4"^huNmCЙaf@ VFmNAڂ1tfE} mV2keô װXmtUg@ 6zm x{·9<p`mTgcB/KAemŔ'&xy: Lk(m˯w7 2Ju8mJ19$3@{#>m̸YPkv0 Kmш]fYdBk?Mm?9/xJn%>;Bm%dM}/ (m`HT,ygmjrR\/Hn^!@C_jA=n moLϗGn!#=#QS[n$›nj'*|(An%׫22 Ou&nn.du~GM=n8mZ>J]j@nD!`ЪnNtzO75I:-<nO4?Y('a-X|wnR(5_j&v nS\Н4%sxniru򭋳rUpnr ~{|*P~kalnvV8B?Siöۀߢn|k<"qA2~nZ]U,s7Eaq#;npUǎ2v+9!nwX cQ:Anfo g?vnP5?ni|JW˥jѮn)YXD"z#,-no1AVy/25G"nޱ*ʺ`Zicnh dY<_`nv[̟Ƃ}o*f"j!A]q<o/xl`+o{/ ס)(o-rD)`}Uo$V3.5VSO'eSo+rCNy.o-Q̂&]qTo<WO="?oC7 D[Lc oU$nGtxeu=OoeΌ*κ/zٱosi23dnԠA ouXS)O]DJoz@4=Ls:\o*|L7ħLoi>y;Z4C1o>K^C_' WoZ޻dA$٧U%o%.a~ ToV&pQ |Ro@T6#$UO.NoЦGIX ҫpo!En:?Z!oC&պdkmloT:^]7s04Jǐo4VxsA 꼄oU\ etleZoӟkň &cȁoդztΑ+a8:Tlio@&v}U.koޏzLkCzb$oIO8N0']A.o rFG6 ̒(OpY7ZCY^m5pGfkP=JWxZp eNbj/GpzX$pL@/BR61p"JxX"dSp(k4;G5/pE:)Bz5^./pRZw@$4[etӑפp]NoZد,$WIp^IMj\KMpeq`{"ֽP9 Uped0%u*f{pm0'5B E]^ }po-46vVpu&(6^4j ׮p~^IƟ]8H`cpIU>Qfhtp\kY %p>o ul@+wp[Ia #oVVp$N fKM`6p5;?18JUI4puR@`6"NpŴgAo[;ɽ'pZ1GƐ帠,lk'9pf:P^^pR~I}0IٶU̦p[Yӫ9B3pq }*5J)XnpzВSن 3nGp㪠$lauAD4i @pc=L^z c^?p%(9%|a0E5p<Lt*%#T\HpN{}Zq/)Yl  qJd6iRvqBo$Ѹu /0 Uq,EQM[ĝ-bq6e+_+p&e|q7@\?qRnxΨqUT&-_qcBNf i6?)kqdJj tqbW+3qe0_LyO qw8_+YWJ$qxKtHn&Vdqzsw9k v%q{ч4Ry&q|ut$XNmyq}&u|VI#fqNI w2kqaf?}'ėq8_ ꛠ\q!llHʁDqi1qBGDpPJqph!r ]qLyBIӥ]qG*zڌ?lAq^lD, : zqεE D^LaqFc "që]q\+ҟc='q]w=G_ F֗qAR./Cuq?deQmk$ q&Qt]$tX qsR]*{4ZF)q<af<~ }O4 rX~ǰvWVw[or i, 2^OkchZfr$B["0c3 pr1rߘ+w/q舴 r<Mʤl bkb\.r?9B:m]e"rA!_TE5⢏$krM()~\rUR!lxlCl9Wen<Xr_LJӒ FKrdMX Al)}yrfP62[Mrg] an [arlb/xײ=2ar*0J˽r*B9@?m!' r}ZYr`;Om;yrb9-m}aYr lpa/r]U ˰Vf#r ?Cm 冕p5rҽ?RW7-VrЌ :sUrזjVP/%Lr=(7B [㟍r*A|r?d!ַsGcF[s*kM;Mdjs/LHyC“'s=3mr3sBMA*sHAeDJq[fsIv6]vT,^PngsOBPNR\UsQOSfBcyC`sVs锿$i)[%\ s^m} zx 9x`h"_skDJ)u?s^ysk0n0&tY>(W_tsnjmEq'CsuL*-J-Ps{Þ0&TJƒI8(e>v8sPVov1N b sxj Efsu$]bfU'X ysmzQ0@? sgrF$ )s]h.vn|scTI D6!s:g c݋u%{ps Vpp`0smK&﮴5-qRswr{[pnzEMs1{|Km{Χq]htH!=[emd0tFԓcBt>åtivю|YbFt?C&w$tΨ)7Ģ6+t֪OZə=oBGUt$֎p&-t;@) qmL .tBα/1,>$tFdX^äf/|J֯BItHЙ2wzInsFtd|uydtf=ĵ|0tĺCJ&`4Utqi85-{-kStrYNP.YttSUDkfh 4%UtҪ5T#89t4bʺ,ٺm#t V1> ZDEtv%fӴ,g^qIXtOgI]XtGifuZ_t'-+99 t؝?:N-&mǐtrhUG`tt]bPv= xdt7\Y-wT,tIٔ 13 tlrlKP ixz@tndž1*^3ltJ#؆a7]{tZ<Jk7txUR(K˺nftnꪃuu\69eZ؀ǻux_KhreuUJ)!{=#߱uz~Gפ̪ylTtu(Uf-/u+!%U{B@osf u.yovP KZu?y}fydc] ~u~tɪaڤxWunشGCwu.eAz-Y5uŏ+ e2J=?ucSH15xnu {,\njKuh#t<Ȱ$>p ~u,*Ҡ!i y2ut zȚҪJET`up0Ss G.euR@zp`umCoSgsM9v |N o&Cov C3HN&uv x64\Cj8vUC? J  <s^1v#6q~1>Iv2Fɒ0ؖf\v5_JS3WlvCٖb_Lă5^(qaviKZP]UGm5!/dvp@(~8F,&v~dc%~XvVi`N2v`Qmvi0*KE;tħvʴ:ޕ! h!.v D< wcjvkUÏB vuH̀븡oQvïK 2}15Lg\vأ1MV͘v0 y$Ob-wɟxI8vlw 7.6(Ŏwa;jjMlwwY$WJ4sw>Q$-E$w1l`ם GjL<& 9 wNS>V\{CN\wTĎ&~TG?9|wXbtDn3?wU3=ުU%[wyڂ_r0@wmw`q3}̭w;CW)]wdep  Kg5wkM&\GFգK 3Kvwj呌1X?nOwdQOy\wٷ8tF$DX( wݤ8ݤo8t#}6w޶P^|e9g/wF?Hu8!RD]w|&!Ib k@צwo3qRK4e'Xww,(_V&RWSMw36f hN`O^DwP%K(SDuqwȟ-:' E x akzSA{5p\x(UvJL[|G}\0x1Œc=ljaP',Ư]x?BxP;ܫx@Pg^g'Gx@+\p]nmxAH@1WR.E>ʦxCK f6I̐ԢPYxYk32c$BxY8)!yߵ‹Lx[ Վ^x {7:x_v{<TeUxeB~^qwE|m{ zxxiL!o=, (z/gg$xxwnx3V:s7bcxy@+U.sktNx|k EZl$r/Kx|a%<ZT:?2HP{kQxfUOx['.E xd~qp*xDeVh,2x%} iq-R ѯx̵rsex#`dxC=𳞍=FuxNY7d[mxs,"P:Nxb-7Ǧ/+ xܐ$%f:xHBr,xΰjY>*elĊӶx5\((MJ{$āxުk@AGfy-?}.:gq\;My)<h![z@by 垞\b,PxsyK㑎1-9^Dzy$wPg6S&y+,.:F_¶'y?Ej_2;TXyDZ4n<9ԑDyG,b*̀@yQi/Ir͜"F|yiQ^r1 <8nyu&xe*^<!TFvywZXq]Cxyl^v>uaayώю[jtzμyg@ ><mMyT{ .r/`yLlcJtxAy%dxyU$U*GytP1 |>y1+tey b&\Tw{5y,=QR iΞ)1yr -R/+4yrCjY wJ~z^g|LJ!z{)}+hd_ںzlſiK Xez> V-}z0M{۹$l<M9zM&RdrV(-Vz!! 5KE!(]6z%bɦޠy1_#z-n=fRz7ŏ7iU 'y$y>zM-_ND'3ğ2xz_fr- 'gzj}0 R70C6znI䊉ةF?EF%f`zu"j>PUͣKƊuWzyR6$X+tzzga5/>^3z|5t;:84?gz~&ڑ84 @(zͥ&eU*v٬zچ=Zf^%zspy g]`Azd\ԝBM6WzE0/kn+-Pۼe_zS m=Y@i|zs ̅euz϶+^TqzҚq7B5+_cz_8dzw?yzumw/&Hn{ tN -UIr{][n34Ws'{콜QF:epD:{, c{8{pɦ%{BRTlկq{H+`wX%/FН{KcS``]vK#`{QX >hRNtc{UE~B `ِ=l{Uأ2b,W\C/Ⱦ={nm2qYoX{fW JHjK|)ձJ^{!No&=ؽ8#${qto@ܣyA{e--&`0 Ŕ{<˸-0;5Y{/aMlQwM5E׶{ &bR{%}<*t>{o CmQO@ 8e%{ Rϗ 4V`A {|3Kfo x]{ܮSX!Sj{VL1|uy{jS"?a)S۔{!)`T[ ZKXwT{| 6q0 0{' {$Z<MFޅ{3Yӿ0Q `{0h J%w?r|<ţ,tb̓|"G\J׋sd%C,|*H*й?ʽ.jf|-;3GJ{|2"놿r}R-H<y/|8{e)7笇$:N|O xK;V։|Or 785mQ1r2?F|P~\v@ ||`i񝕇 ~ M|r[=p)ap|}̉s\hmXoZ|J5N/`Jˁ|i]@t7Y |p q0xXи |EM&=*D-5 N|wMȏA5‡|=9{,쩨|ߢ |iP%6i|6_+/ƟyaM*{| CϦf͈|扎3\<Ӗz |=K9D=}PU}LjETbcJ+!]} D)"\n8A,} !#f>}4!U>܎}nie"Sj}4DQ$ޓi Ru}}7d7ПnFbbr}< ]e6nlMGg4}?v]Մ78^S}@ʇ:MUv~]}YZoJX[^<}}[9bMw}k=eIP 2}nhB5f`ѠP@7}}"yn~<-}|d8ja }:ͬ5(}C5ZyNAU,}񔥃 7d@טG}Ṁc>z19}`3 1^)Hv}ۼψ/ҎL}BϪl4D!}6<Z=9‹J$}qe&Lš.qŜ;N}'Q8 }j(r&QI[}2s`cf.6s r}Py3%# 9XIo}\ak|PqcRI~ϓ( δ !^~*2 I:_~D'+ \#o")~M+G@,QRN~V62jS{ϸP˯6~i+E<_&,a.~iCNvőrj zXtV~y" Ǘ:~9>4Z,~W#N*u ~X~3 ~g~4ϪJC5c~S|@zWL~6M(/dC?6~Ձa sVԵyߨ~@t^ b;]~ϬuQ'i{< UY[W~zqp; j!~/ZSh+U/\#~<IS;LZ~93) o7SA@/e.- f?c.O)]r¾WB)u aufҒu&0~Ác]ӕ_ B2+'\E )Ҵi+k6[bg*H) dVN\ 8) 팞0A=wZe*R;/g&K<Y;m<ɣ3&7D?'hi@{gR52ȗR/XfBLP9VP0Qc2[Zk](-Z,uHhܷ0QGb}(ɷC9 `wOekJy]T gwqzZ*k"qF_]p3N9\opVtkO08Ev dkp+eI4wu\!tslYSv-݌^*=E&"|L..CdNl!!+4gqߒ=w?ىH>!&pMaa&2rI.y%Q*"5> Xh:U$O2lfur>uLi-N:َ2_+E:qޫl?ٯd1B_ښqm܀KAYRmT%ZAf䭍]p \V$Kv(KF(^]7@r kA9bGMNZ^׀k`Zz`<8oyրpBbJi&6"ڀ 1\)%(6zyD]<ۗ\F,v0ɬebI7/}M~*`#d9ToKAb!JǧMy9y\ՀU(ҩ<o?r S8^B{2L6B6ƏXa j:GԀb.WH$5!zya^TʧPnc*HfRK/XC( |*.~I.l4k7pCդݯs,D cOrJ\04oLRvQ[Հ)%Lbࠕ* ,Gr/df-v ,쬸ars,{` I!n`[3uW;ݔжZ_,pjuj}$ZH7PI9q}Zf2| H ۫"KF4O(B>WÌ5*M@:fiPJ1aϵQ_Li:i]"Ȉ7"S:N"84p*,[Xi]2~"RSmP%)职1/pt](]*`|]y8DTN8[­p&\lUyz닁g}Zìxj|lai NBr2_5jFRMypAN3K<_lIUv/qOYwVG9+O.7jN<w5oz G[kځb};8¸O"Уo1WZQ:v*;dJ卜hj MWEv-JxY,xq2Tum+@jk ­ ًO͢|\lO&x 5JqGx0O^c1uZC`#5PndZ;9 [ %/`$kYKykx0-C4V|Wn;b͌Ի~A f?dG#GxW3=SH/+˺~'ЧWF+w=*tDzI3zbk$u4}E+:u6EŇ>-$%L"E`p{䂗:~EDdǹ9;U!| ujقn,bs ]~ "F灇Dx \-BIvE-ǒ %ĕarKVTxEu茚CԏkσQx t@ZBTT&Y"_o+vpRRJǾXU.HhmЂ%?3k$*X{,r'xJ/a E0t<ZNkRCTl6ʇZD3YꮿG~%vK pCFl{C {l:!aa/]$oZNg'Ebp޻6σ(B!SSC螽<9\,`}/ҵH3c/Ӽ 3P' (DH` QnH$Rp`xO(c&GPkb$lU&v(Rqy#v2$_Ht"ܝv3SF?D+}ڰŁ >qd~t(4s8Bt&\߇`ęhϷpmBB|I|p $u*0kjaw9~"˃c|Cu:9EcC6>`eBK YbMwǟn1C۰eOI/^ w^+ZG@={ 6 9 hc|53)Hp̃zN#->Dj/ʃfO D@玄5B%:R^Z aFJ?~9R0!펴 Dy8p5K߄,p>TFp.TuC[ 5Bt aV;: āMәo=RƄ@Sq cT%UYYIH(1ce]D^:.IG9{%<P9>:I^dV{ Ge8'q\2`19rFiKa]h|K#[a ɯ+1~ K'j]k7h焓Yg4)>l}v0vJ!zۄҖ +I } p,"CBCF0qk|z|` ۣоc}~F$6D9%࿄4vV\ rLngO{Cﰖ㶄5_!*Xa\Yo>3`لf/lY)=BIمkO)ө#9y:n;63mV]GMW;L! GCH/h҅'[ ,W kV3dQ2e,WJ~ "5HlZp4r=28#P- H _O@0|Bg=>u?2b"Qʹ1cƽd]ÙB:˹ueY-^ b]q9X|N'<v{⅁F *Fߣ7D\y-v5˨A!u^y5K9m IչkɅl|y ng#n/߅W?p^E.K0LU:sGD g'j1"XJKa xߤ3`Ͷt/)M}+.ޣ| Ib+OgԮ+)3{SjMņ3<y; L9 kC.Q`,~BplSCފ*rwAt!Ǚ| *]Jن>.BEܥP4}5n*{-BMw܆*:K֮$Ee\[O7jN3t $KgxEfm4 k$g0z"8OcN;nxWd"XfFs.&ZYۆ"0@E7`뷍{b$5EխHT!2(>~u{){5چe-Fyg 豰g`u!Y6qX"gC!~U%\)yye<㺚^kx<v9نyW2K݅鉴%bEmٓ8ShMV LȆ"ocZ˝+OԎ3dg|G ƙ3N [^^Eûކ&5Qt ܉ dc'(~X ўp# SBʥ!N҆TJCTC+UN9zN k퍾)v4=W'ǥ"|e #dtӔyyճ8;<Ɨw"J=VxF3<XݮGIGm%e4Xb󦝶|&w7Y}"ˣ~R;65[蒕FoSm AbEl*jLڇLܚ霾&D`50[TbVY)}7kS_dgp|`CX5`Ssd2Q~6xBT!ǯJ跈3/j0@w ^jj|b/7U$ T3l[SkT0$'_'d*ILؓ]hf^ X>C $ wDn<0],=臨|j` )sX*=2fkڤaXL- 8췘BӇT3.YgEUs?'_pyo}^>+ۇTΧv֎d{3,9V1Y#؇MSܾ|8-ڇ_$w҈(7E7xz|5<>G7crYHa*c'padK]ZSIax0Y P/λ qb񅡁̳}ۥep6fF|+_S94-X6쟼mEfƜj[Q[:u#>@(BÅn %O08UghQ \oW󍋒uJU6R=-alQY5`*yZو泂?<Dx= *ǚX<uPi</<U\KꍹOlPz= Zdr|< n=dYxN/3G*ݤ Vj ZW썉)! O=m}1$gBϳ@JkيN{,lZE/ s<+Bà,pdW %YPڍUcx!QT<݀W4L/Xn~5K^J;})} pb)+(-E*}=xmK:%' 8@`<$IIA? 0EQe]"UnP<d]0jEkuܲ13#&Y#eUT(t@y؃qtRbHZ8ObwV2N2?sblrq4c,JR!6`9X]ʥĺl 10Iz}k=ՀyQrS6 BqNlj#r&$CȉoX[2dE[Iy}ʼs";ߜݾĉ +ѧƺP8X:X+v"egbAfυP$srCvcq3[%NbwN|ފo=厣iJT"v(`WwM K`# j7"%7'hF-hl2bh)<o3SȊ8FI"-?ۊD xiɂm7-)ֆXps"f%Jdkp (Lp Z\ tsUy5tn>_ t۾|T?B\8ahmt,vmjMxwxWxDTsL{̖*n],QeL8UƤߝs~0Ծ!Q늌Cn:bOo|9Ј:WIUo^g;_tټxofP/7*YUֿK:Jj¸%vAb 9RPX>-EoS9$hۢ&+fzA則V_J}|R$(@nq teR '>mJW*% "+@`\-sCnYmOajɴ]-I .>]-$u$wk B aˈzۋ'&9I/-2Y|-'\B 07t73]Uff"x|GQǵ;gZPuVx${L)FN< x$jb@T.7XHAbU2&i8#T [ %)LbSƋT'S#€<`dt94t3) 8 sOf(TŋzUK*췞>L5@QYDb{ZW ơ8@0D{I- Ab"?h2^h~q^ewVYوKWiOXl<+d9O9!:!cJLS@ދ3Mb4N8f愋Ñ)%~ލ{Xlp '3)GT <zILE,GMd~mth"Q 2ɥ&;~m!ً;.0AQzš(%O7g ߘIݎeq /) &(tnȴ~=~6!4ju-TZt+,6|,3pg[; b)ޣwZIA!/6ü&{cI43L·ml8URJ[;wdk!9*_P5PG#\Snmg"3x>;Sl7ze&^aT` x3v[p) 0ԀW3Unތ;&><E,rE>jf^nW匘J#UۈANB>*^0N[J7k%z&5`])%)G엍SIIw"eZB qM~+g `TцR94, Fπè@TT<w t0tÆ7#kJK{Cd;VH}q\g)fe5(n92c fA鷌Vhqɽxf;8ۍYѼ?j9s! ؽѕ.jP hG!ū987vsU=|$lu͍b)a,&wiُO~+Ʌj^{h8my5M|6:jJ9>W#{'-`qB! 6lG`l:M38 0Wc%, d"îX9 BdN*Z 󣹸l~ƍwT;Kn5 rV p蚋 X}G`E󊍒o{$/yy0Ij$M cՍ F¡.dXʍdiOZ ] #{zd~6eF5F[ڒ[Gx0e>K/TF^(?]gƏ}-6eX^륍&9DGߞ6LEFėH<YDXA._* ?q>URD# cq;l=A<$idr,@ܻG=ϰ=oCknAR؞8?]XyҘ5Ɏ^fLʵ=~SE&_NrýlQ޴Z}E*(g/\Y%lJ;uCwL ȘsÖ(R"sp/P%8đFZϲo&A2UVq'q 46:tf\I YKZ՞tOڑ22*G"Վ=Eg⢓3vjﰚj Pzl!b5Mb9Y?3!uvioRU J^Y(F@q\fz&k4'eՐ&{Shnja:1Џ9gMk8zzߏe:!K u IX%CC"!E,+*qȝbH,I`XX|FNAA6নqJ3mc<IhʷV_s_[6M4o "`XVa(pm_8 y"`Fvt_Ή4i$ryUl$۽dNI擪E&#j_1$p_jJ${I.;'RMWUqi|3 [7(ϳDsb0鿰)I{0pOuVprfP#^5phsMUF.f ֬͏zjSYCTݏ;L֍˔<oo' 3QiL*+i!揪zZ^J}eXF"Ox9K`qֿJwŃEțNqh:+_*kHdBw.XjWNq40_U! 9Կ 6. 8]x<> ݇"\c4;CƆi||40moԚ#-RܘK-Hΐ %љy{^(% f ELj礮6Rpv RJ>^vm}dJ*k0 k t#L!tH#A5h'X%4rX!`lА/vtaQ?$O8ZnZ{<qGȉ^;"0:]5H @wO'Rh%ǐRAi"#w*\/|WQ|5R א]D@p.gNIbTn]ܐi3ovQs>zاĢk7WTsZ-{qq2WiNcCEy`#$do9A[#-`|@v9^GD:Hש< mkpWe㐍(&U 8oU3J,2- PꐏޗыCQ#ۜ&krOx䐩yJBRYZW!qFJ kmbgت8\=*7KU# Ϳػ |xkQƇveVxW&}{c/508z ;lEʆc2en;މKԩ6^s&5ɴE99MbVz]?uS8];WM?뾔|#TZOI Pf&_?*.\i>hEx={+`Qa!i.F[tođ,N3e㾼iF 0a~dږbKdom1 )iHXp31Ќo48EnqNjwD: d{wf@mn,=%  Fx<V)uⲯP%+ٻ9Z ^4B4ΰq˟$`8X1V͜,yhsYGc1ΛV!R4c$RW3~DfC\eAѕP(2hz7{0B'2]S ir<[tl(H9tbTF^^[߹7&B)|1X5@PD0꤈T jرIc :̦=b=S#X00Lɨ+_g XjLצ*-%EaK`˖+u9IƊe*ݑ+4s/O&~zF8 K`(Gt/MiV7Vks6\Fޒ@7XN$s EKg^1 ]ǒвUCaEjy4<:h&K̨4ˆ!4mHϒ@aa_˔ZLU\ﳆ0&S!0:יe[ڥEZۦT+8YDD5HAWAx|@$-ΒY)imZ:V?Nzr&jD4Ȕ3V*)RJq}:"Mi()lwjEN.ܽ ȎwϾ/ KtNRw!CI(;j+{'/XB'-pO8 \Xg3σôն>7?i9T꒴Dj/=n`=~NX F#xL"{$bϔcukK7<L5@ahZ]8-<%5LȨ1rJ'^gp"}o Gp L6|10ZztGę,͐@6`WK=p>C;w2HG?7 bsٵFAOaҍh$BGql#H~y&;ԳW/?wύ=;,bXxNv.@nQs:`(O$a)]}5g{q5dvHjfin"I`cF' QgAHS]e8ٓ` \,mhZcEw=QYӌR,DO"ۨpkS4|R D;\W#x#V !{,9ht/Q"}g7JAXǁtC6Oʇ+/CƵHēbn,ݯC0ZPKTP LBѓG Q[_P 5G/[N>̓X_(ud]lHք-\߾:F6C")x~\0SĻ˦K~)Hsle#yHJ7b^70S,|$l y0t>@G\{!i!=Z9eJ{y\nn;ov 8%aOqEvsC =`"$&ȸ7> ^ Lzv9{v&+2D8e'imjwxKF "j(ϖf$9V˝rP𨿡#,-HƝH4L \F2*R|y h^#Q33VTp$TD>u͈Aek' rT NE .?@Xm_1#rAvπpd?`ݶ^ӰWhf]GzzXqlGEgM7q4aRPaٔj;QxUz[$2*s%H),Vk}{=1\焺`sk/5[=M %e+Z^$Idc甊^>֬9p0KSIu甘@Q{c$Y -Yli#X)`| 7ZZL-De 5dTvb(KazcK4? Df. =0x+67uyg3 5]0ԩ=F;1 %*!v%{\42A>eqOeŔf9@n@S֕5lA-Q{j TEl#}R{'n5ϕeiQ*lLE]$fi0堨}P.HX%kTöFe6)L QrSu$5")1 CnNfO>~ѕC/^ 6pOT=rKtj}޼#Ց'^P<x<?Bu^íd %)7t_Ɋo{$pɤ \G3Hynrse=y\(fbcD(fiuh]ceBJ/5̟g\ gp("#nNYpL]OimP 7񪷶Z$aY _28L7? rjazD֑Bʕ)h3[0e^ͨݵ4~ըhbz:Fq!;*qzVy~X>?ѕߍvXHZ(DCw#uJ<`dT3ПePm$6;iTc܁O$o;/(<PcZ3K維ifL}OָxM_CZx+!-̠~`9cZRefdBb!YsdX%r`␳ '*~'N 2%H6F8?v%)q|aE=MTC'*i9"ƶ<[l2W1wtI Oi_Mp6aыRn A%jWيm=;?v*"ƖXB;N_k~؅6|N[U=E)ai+`-[cT|kCp;#d)UX cT"AGx%ed-,[V<oe<Y9 ^`w hDnK՘ Ϯ)㖂Q2f3 rj Xo";WƆ)P8v2*H+΃m;{0SaqGÏ31:Hz؄ԷOtG̶<Ls4SLL(>@|kc<T7=SǚږO'c5fxf[O1kbϿ9Ffn!$t-8M嶏4ɗ::X([Je'זfB(ur>̭޺U.-٘<Ru0KC tꐶ"]lL(nКK7. .0bOPW%_aaO 7AzWNŸ_98,貍'K}gWR $WpB?${I8T,9eG $^{'4ZƗ 5ҙh 4bʗsIO&-z,3i`|搆zNT׌^|8Eח[ĕBJ,R. y;ZgX1qTqa;_c3PQ3S#N9w }C 58GZ_+u[Eb+E~ r]qP֗SpCT'?2H>Wj 9onj_8Rp(fs]8a!%3P2~1j*UzO1CLߌy^ki[d 7x 7{]<7Xm)U%K G? fix}<Zs$Z᪵Aٗћ#UHMMbY" fk(蟰4] Mm}dȕz Aahak ,_wLGa"eC[>(<t<'/D|_ &^yqIJcZÚA㦰Qv+{&/D:VW6ic߼.A-4#;ЈR¤Knwu}=%`Fĝ"Eؘ|φŬ d-% zZ)}6dlC6oCHc4cS"x nkIژvvAF!UZ?ˤg(%xXy6IJuwI'^? ∹1E.DvCqyr~؟LF^GkB?;՘P aMX٥'#˘RCׄ=]%f=XTA8JURo\Nr?:e"ݘ *(VD*Ƙ!(𳞤_8V*jS+Z%9oAmUm5Řjj똦6<ҊØ?Ka6@Inh7mP .˒H/!+^*)=LQ ?u OB8hB%-%u,8ztigǃN==| fb|oZQyC3ӭ.GX3Gj;^Iq~$.n3:@3 @BR_fg; ߙ N^ RtS>E:͙(.}f` ^w9YVU+-dIyұ<\ꀕP3S{O1}a='_qs@0ߩ<= IgFl_2gB%2_UvejcکVEy}a83N͟3Tc=F~CwHЎ;6jAy0~gq&P$< euc)V8w|s2UvQKYp-pXZi-nѳMEFc iRvNF!J4Z3lNGe%.u*6O a#>Vwz'"bT@YJ'ǨCtxm5IQ NgbRn]@ J>Ή w2S%SG|]FBc[]92EљltNt-FA JO혫o=T/ #\so(riq&lkk*)K̚^L?̓ -P~yS (6|+nPz*:9Bfdi$Y܊d 4"FR W[maNNDF]zȇ9cZZ 7wQه"П"2 Ooi<#s֙lBؗ)")c# i6<r+th~w/zК9Fx$!et81a.QKT9dAk*@\66,W猣W孚nYM2+1;psvmo^ bGr9ffh䉅VКLjps Ϥ5:v @>aTfQ&e#ڧHʚ4̏F&!8l;Rgyɚ4*N/q-ˏk*쓚5wlӟ L>ڧQƚ ^JѰX6UϜYr씁:PLԷxBϔy^= S^}Ayd`՚4 XtrڕOxh齹:_Kn^'L:ygߛm̓5* k;!zj.Zv=1;i^80hzelNqf1ZI]y~vcC^7"1QOTSQH(>JcVX8U1R{u{Vg?Ź+ZjYPW_`"]E8!]UckͼYܾe2T$2tyݐ{_>iA>0L-v~&b&j[גj\\SI^ni!OxvL#WB'ppeL3b!1ϻEۡ{McL75#oG,̶KH6 F!zx\&/y^aa`hX7PxFܪч>ƜQ24*#.OAtɜ[i8 z'`WĆlw,ֲ-̕t؜SU[!Pb>u Gj`'dSf->:5?3ܾ:2X:O $v'?NQv^`)ɜ@#*DF.orr e 5/6Gg/w{-ph4A}#lLV%j$KP %-q՜kB|æRMԺy q=pz\q<ԑ0K{$%)SE4Kò'pabXyseya^/3ڋ"N:iPˏT]0r5ɼdfM=/zȑUZ̺] Sm1Z~(sb*s f6?-w5{lZmO-xR{.Gr{s+m0M4I+NB9-4/,e%6v8@"5OZf ivk&6jjR;iC"(Ӳ(hpKiC=&t9JoR\ڝ|<8$mq:!͝%zԧ,e4ds1#,_S0/b59dMN:c<TmV𳜍.$Q}NeLG:Rx&\RRH 1nΙ'R_5Gq~:<"F_^+r%3ϰ*. LTl'I\W9 Z`ݸx3[j1mNˬ%'/JI#@ Ջ}\lBrޗ X.Y}_Qxg= 8ɵlNčzCй йj>_</]_cia M$zǏ7 ?̞\[nг  %Gz.̷ApKȱ`D_T5a0\qKfU(_Ծ`ŞMbL3!xLMWQ)jX3=sr~V$`{9Gʑ4 yMza+UPU::jgcWMȄS a *~f8!WZkjٳHmg⭇(NF'F;Fs:&ZDY`!A2OvW 䢉ԃp;@p=aF2Z-Оjtϋg>NJКUDvfNQM)_$~8!hcGW|ކgsEHy@O@x|2L;'{Y3IGH*Ӎѱl.)gcWu}o^(R;7&Mp۞/xh_NzNߤ Uv9Pߞ4\aH6d΄6M E-oVJ0` N.HsH g!/g*  Cw3؟aU~7% *<ok2~GR:xNTL`u7crJ¯>؟!k"~cá%l<*ld9 ND-C9ư -KMhUM3>,1a iG80|;.[8/?ZC|Pe]/+2Ld&>IrnuU٬ͨXӧ6R(#MUݛN<f+M-OPm'dXlX6y1ȃ8h֞L(9fɟz_|"5}62Wvn~e+Mё<p6W\.DOQ~d1 ,.#;4"5y +ˠD..mC"Gc|<0uPީHIW!2nހQGZx[l ʙOzW:% gQ>nWNI&{R}5q):FTH܍o1Smre53[FL' aCQ' >Z\4EmL> l~* j|Y$| =´φ4sP+Bq2]]ml4l ,pا ] :8 "ZJVLcɢq2?@Z^ŮV{\yJrt[KnVF :Z__A9;:G.fImD2,C}&g =#@2Wwsk3۫: got;"͖|Uo=,&|57)?7n@,b&UjK3sXOveN-'@S6oo3_0ne}Qלr 2Glݐ !KoRۻ;˪hŸO9r =⁇Idr!@<L1Bw(h7Rס:T)Hs9?h# :)\B/BxⷑMBUuDHOʄr[gԋz0Pd̟&2~9 d6hE&X4r@E& *@TgD%&hQĆ<pقqNMd{-,v!Sߝ"e~1ZP/=&ס+FYe"\da,+$0FjG]Tʷ9ULh"W<"!8y *5HnOS:lSɃݒءP_ºE,Ղ-o*Î>3PU[ t `+9zٌsV5g׼^6f6 ݽC6tûRj,0Cqt>c6-|JH 2\s0!E~G p-Bޝ|=#|Dh!ޯl;3A<t<<[\@ti;s|YiBܼGX&N\UOtӢf":D]S;tVsaG"(Kǚu͢*&ǟ> ?TwV/›zM T8čXwtP%dD ɵ;wmc]'%w ΢F&veХTHnc]wno>pLp/i75b1Q(WlyvG"FʒZb LھZa^`2O)W\`eܝuL1.ԝgpX+K h^[[8;HqDZo򮭩_r,^`JQ%ҫE rVgV`v5/, *wT`֊W!qtFcFsKlOd#k/ܺn5:N{IsʫeLpb],)0,1i_Ӭ|fì, \2pX$H-*CYH{Ya/ @zޗ#?L lH<p̸] M[9u׈qTvĚE5n`pf,'ƣ!Dwu㹸\֣.$4il:ң/Yr|5{~Z^\S0b7t.q7i8Tâ'8e_w4 94cgmjM5@P, < {}o \p^+jN툓 Tߘ a1YF5c0V</r ebN %`3}bu{_{$%pDh11ԣwE?a&PPgXfZ Pģ?ih4Jky9 TA_ݜ^Y5Y$}^|T9wIɋaF: ;<d⠁_'AFƠOX0C|$X ף쿋IZ< dٕUWRryDc:FQWB0c/7ԃ]C|=u"rK@2ժ{'B+u]L;B*6:J[#QIHWl#){J+LkRCe&?8a ( ,DFsjߛq`"69lV Ag#UVHEO切ds,BexZɵB0^6utK~ FeSqj +={Ù E= ,p dQQggѤNu$Ǻ/XX򭤧2DP,S:;& J@ISNn#}&j^ĬKf[`"(r돤؋0 DX/0~9 4xGL'~s61ϥd}Fq_M GzQ b,^k!4u回%CXuP #6;vO4Q$ӇX0H(6IHooz"ʐ*݈7(RZS.5PyN>0oۡͤ>:/M4ǓRn!E٣<$S9nhYC%:{m fC"#.N=e^EQ7)` *{$X=f5`:2'4r2%^^;g& ixCSA.e{""xy/in7ͥ-z@!jFM|g{LkJ A߂8VbP1٥X8$FpkT"V5zqG&B/1މl$wUFO(mhڹJ ѭbq ?) rNO[*aE^ty)bsR@2 89~ΤCKa۟3z@KN*Cz I켿[%&܉EesI-wwvM~L]A [D,ײGH~4s1]wUH8rs*xK<zQ ].Jϵ+BQMW5*z{:[jx[Uݦ2Eu%0!<9֋2Hg> -y<"}qr9x.Dvh2fI/vJ$sb VKt-fmɀW%d٦Q"+!nIweYb**䶦Z8Sd䴔B%mG/%\¦nԗM+?j9ƭsu#(զ=#R7ȵtM6FF!e򮦔#3v\0K+pdЦ9;gZ9*Ł`y56P"xNӲ+sOS Lc`< p4rqP"\έۄO^渳Gg0zdF ~,.Ib#]¨E`%(r`kq:բő䓨17tP.ɦa}kUun@EiK留ػ8*@</J@ Ԓڍm'p<V̭KYczAOi;m?2PyGGOXyG_뮯 j@.+2Rc`rKHR@NvXbd'|(60B[ry%30޸5;7t3S/y=¨=6vy:Ķ:9Ni$3iɋf XZ%'T CqZ\X OJ/"qۧd kM>FR!uvٗԡ 779~uQ#iJB,qK+|B𓤉dWޡԧ U>V+Y\Ȟ0gaTR^3La=秈"3m:cGwp%u`$Hlµ-9/Q] 龙J7H?+?.+'U;B"(<]Bj |faK9ؒk}{ a{rdz,3>6' 4rD`Ka}J#SBy)>ߟ֠RV;" ; ~0ںi4|3yW.hR2|gs; }T/@WӃd О:NL8 Q +;,VelCŠBzmZ+H &YI I_~*(=jh>U߈9jsY}e6!G_7!xU>({+32%/:N4+^ ҕA._ v˓5O4m;qʅ'7+t9#OknG_jy3;U ZFߏ,ﺀo8I.8ޏoGVb)W#W3ͨO#]s40ÈXcm$4R %ZPOY6~F$jqemMRo ΂W+ (A QL.Mh8,X_'n ¸l}d<"DXaM:p(O E8i j-Xy*k xH0^|OtV W6zw\r:>qe)r'`qttkr 6WA3J۴!̨Гc7v ȢѺ]Rmܤ3Y'"f, ɾ(NXXVI%Y@3`Ď/i9ʮ rvpHZz9+ '&JYR ][ p.@e~ʝ5Bf 5]cn^ Iߩ(iBZ(q/y4.;9U-%zD]M6X /c#]Cc/%X+(J8i/̖MRu @z\{yީq[Treﮦω w7|xd[F>\%sL[YEQة"2EBxLdqЩ,tWS Tdi#8gyX0f_H25_^m P(,3Pj SY2˪Jhmʍam\' o)$gf|8qp.K|.H~i,"NR{ڬO4zabӁJz>B^]0e$}~,27wViR$ک%Gٍe>zf jaTd#W%s_93M~SA#/$ wd{:@}N;+<8z H.\Ia <Wݣ@rNNu1ɞnFi0 k7pǹs3S Xu&?epa̝dqoж;f<*г3,5dgpknz!џ\$Ukraɜ|zZB!%?8!a0,idTΪ5Aq_\S󪘭ldٳr檰4priZ!;|(E/(}I $ dg(L_%bXSRA( TNH$*t=oŽe"}؜rVr&2SW 0Wbɿ>j?y9{5ШOqgwXHC4}Ou!ŧW)ͪv1Zώ'9V-1xmVzVB14aǪA (Tw41֊gKQ !D+@TcOH$Ng#o {O"o%e32q. _&T ;zL?d_B<PEDcz>`r~=PpSOVMok& Ó&zbCTa%PB$߆}SH UE>1X~s\q0@{['1^,1N ona O_6fE^4'o*jiMyq.А%vg+#p3`&X 5_ig) n!ӸDn>Ff嶺z$(އp Xû$V5osr}sEmYf8\I V w`cw__#!T[w-)s%CpޤKn7\6m28K8q\Gh&7YY6nT4KgpR I": ڕ~N#=oѴi=A;̃Ҭ,U\eޯ_ۅ~,OGOi,6-V"|-Y`jg8 TGȫ|XE2DU3ok4.lŬN 跎v#Nܠp$leB 9BgO7|N׍i\+L֏T0OF; ?OIpy_S)_zjs![Tr$n0r(Q+( it7jzەlyOL6}(Pus6fQ_ʙ΅,ͼ&?!wQ(,kp~kmc(Q|",0hVԬEzWԬ ˿V7鬽ە R߁)%a.QmG~5?kT|ͬˍ <E>gOm--dJΐOlNYTUdFz Xj&ݪKKBiٛ}Ʋ㧬%5zȤY߬}[6ɫCत cgs2*lfU7 W칗K{G޳RQt tDRpB2[?QF  f`W v{=# vk G",A8CK.(58x:C01˓8>`#D7a,ŵ18|LbY@ֻ9|i$N:V}#\?v-@a%/-E8̸SmE[,ԜWDbRU[3V0h9J)*Hۦ뻎5 C~hm!h)"0K/e!s8ǧ$`0N6ptayʮ$ <s\/HaŘ1vNd!魸IWp::1 ]e&tL8b^YA3]nTئwN36`K9I=v PUy:{nuclЩwx_8f4YkK*6 -dd|8A^z:GM"H!J]Fʣʮ7`ݧQ^$=92LڲAI_iEhE[ !tϢCY8K1:9xwK :خM9NPimW:w@ѕ5]U{/>¸Z|[ҩ0ėPî^^m< ޵ۓ^TtJ}nѮlXHj"WxV{}Į{prȷۣYO[[D;H1I7cǮJ9X An􏮐ln4"g_@ஓd42a9ljn C.JY1JKS= ~%5!݁]CY4?=Gϓu}înqͬSyY*8P6&2_0z{r:I ?,0bʮwql1eˉp(?EgZג]ǂzKA W/d dؓELi*MAXnGSVӒ46e螮3cn#jg6kܮ\fA8ǯfm1Lo>K-ЯwV3e4E㉽O$2B Nb+b|mD8W=* I{NikPA:(GlJ^凴sBS<GsU7L%1S$NvYN˔#1Qs iu%>;!A=SUunS9 T w#ch0(Uh<XRᇯW)?^%v2GuX~g+_+y-c39ޞDle91үhP><`ݓj-J_m6)X@Rh<4hxʗM+tz2D:'A/k'xr{(O71}i}p}xe/G-P N/2E{į!;7fv[^󝂏c|$}Zk!B7&K[ub;xv~P!dǯ*J_:H ,|'Ӈ.J ߮ \w0< H4SjGjnDsW-L=3l(ө}oLbd({gB^G'=ΥCb.vn2"%?-VӈN}w5\7Vw} 91! -ʿ]K7Xj3{|@6`ugfÂ\T\ .S1Ưw]G]w_6qU<-lvێ] JƑ&wgizQ&Dk/ENZj#Z:`7"U}a}!2D*n"[&9#$ox4z؆}x&5Ұ;?r#90\ذGF#[ kIF>rC ixI u$xY;%(ѰM-77vޓ0WKb`F bNeUX8bqhTg A7Q9.un7#5X2]嚰p~0Ӹn VcNvSQR'sLѰ{A@-rDYII>~k(hGu*U>4M9:'Gf9ue9ڰM&eV)0SC}/Yr+#WO\ 슏T9emֵ-l ]Q~K$䰚kuxha,2PqxFY "7lj{QW?vA{-"IM'$ ֻ^w$&Co. ~lYt`-4 e[VOZzL` ;4oڱ %ci%#bO5o_=w;k<wv*_$Im_%!$LQL'e`{=:-im6濲]k@ 8Q@3;]dD!W\\Gbהּbk5|ۡ@kͨy貖m+А6S4dbN>} sl_ŊjﱉȺ*tIP*$JM!*05oFH<sٽ+;Y$mtQU}+9< I[qWs¯}19~K#ܟ%|"y]Hm{ɛTrAZLJ\Jh@#L w\<l˳&s [EgrrΖ./ ZN2wҐ{yIhXv0޴2 Q޷$򭖭v+($qA|OiJb4Q*\D y*B ҙQr[+4Ѳ #6W8g$=]uvL= &7CDa. n܃ŲKMz|C>:M Ze8nT4Ӭ߆T0'fZJ Jy|3JiŐ0+!q6 \T2pCkgQv|N㲒V`^֤?S vֻO֛?qxV+◺i+a,ʛ%D?tR?2;ьz U4m&6|[F-|0 *c) \[CUM2ұ/Wp!sG8uZcH'0^Բ%Mxɐu߲ tL(۳F۫J0# Ǚsr~X4kޮkƆԦ*8(K-94J1;o[?&cۥ*9"G|?È%%+`@՚ܪ;q=zT6|Ot jcZ[DF|`<X9f3>36n ̳pw7XP -`{6_ð QީW_QUۯZkbn>ԉe'ɅF~ϭQRsn>$G;Fdy5ujx.?WIpγ5uHltϊ{T Cw,KɠjN5~Vvs3eUMP l x"{9_"O}Z3؝Oȿ U$(_=w$v;퓱a18AbjYǚrFErDԋ =u4@[E^{x(icstjΑVM`1E0v;A}$'3Ғ46H [倍iѳJK7t6ÐOMsAB}*=2{4w#SWݹ ^:S.9l$P–uN?>f*(^YcVp#\w~Ъϔu }ɽ3~1Ir *_JʲR {? ζћa(4O.]:~O\Mps",ɠ mMar./%V9a}81At J7(at04&hŴB 8n c^DUX+:{+&N;-5H46Gà2R,QEoڻnztWiW$PѠVz<ڿ`!`M?ڑa .X@5w&63h&Sdu;=lWqD?Yro܈sE5X?&$r->kH@bi!mUhn!=ôT} ovэ;䰴;+Ϯ^n]?4:PfQ\WQִmlhppu8>iZaƧ]YpZnJVx t<W|*^K ״3S \O$Fl 45 ! U`,JF}ŨreiGѴ~8Et1j`djN[ \3]rqbDuTNL.DN9.k;qq]_s*0 zk1HY KAT?b 2 qR.;E5ؑyMMUB<VeFnڧgх@%Yo0:ݵ rJ\!`ER-@H~l\gԹk<Mz Jax=9i;ne><[¤ bҩOtG:`BӚ;w'GZ*;`͆ݺTn(H83e2PzTv<SD*=ġ6X<Ib=~qN0XfH5ޠJz4qtf{-+xo胊F)^|cn5-b _nrü<I[N]0CSbo.]d7aSL&F 68}9=@8};iugKNa2=&{? հ*pm8GKz޵O;kezrĽ E䦵rd|/Z5h$uWZNiBI^ЪA8f93 u=YEBZsDi = lb A[ LhWfdq؏JRRWhﻳGZV^^Ƙy4 (M|a,(7eC|} -RǗ)'O+Ukzq*hm(/"vmBI2lh6FU6<2r Yj3dr2- *B-[-fHwAaѓzrv9 IIDvRǰ;`Nαv GmaVv| O 5k"#J$RbUe8 hrƿ$GݡbPuC JXoh˶H9JEմQ׃'NItIKM :eLXc-fvH]_Č :PySpT,Ey(?ӫO\;tqjfxLދvRÖ0;}G:hzkx`@ؓY8&W*vH9be9n7GꆢcL'>9" 8*vn][]%q׶֍.}B ϭYm`Ȁ0[サ/I$V=oaaYoߠyLV;۶I()C̞|y| r 1{۹ˋӞ& [d'ܷd똓^kgZ:H߰_DT,7TE3yXeXyve+ug_+b/,zM̩vhUwm[&UyUQ1yVa4,s=#^hٷ`pG6u 3(`ho'{TVHmwb*߷ͷ _:$ xS:]{(u-dh˳mkÁ0td% !5"[زXdS>Bs>fhr:\7J'N2)$7,ǢKODdXuewP`<8anV(ydeȽXf;0 gkzjn >jZA,? [uͳ YK#ӕH֎'}gf!KQ+.UA& oϒO˶ۣlriaR0 e2c;/i.?8d`amf׷͜zhlE=D^rǷ=P3{M $ôЕ &F$&I'Le&]NC%\Γ&ϭS&hҒbX52 )/$Ԧ.)dm1o-mr8)3<|/2R%ћdb43MA126x-J4U/]/9\=91$مSJt9z*~gWId Ǒ=vdD1gUO0<Ӹe?rg'ċf fc$ x2Q\+ SjC{Hɀ#֙%>#Q+/[ElU_f`ŬIԐ aB?C`cuZHN΍ӣ^sڬIm͹.ݸ!Tf> tH"Oںhȸ)|3E'lE@hʜ9߻~1/E3A+"Om kZ&6,e>/JK.Z_zkH0FO@\(r gTp&Q ep2ZŠ8y,XZ'Ld  :XR1\q(b"ߌMGANek Jŝ9Àafi?>d oOBTk$iFRVoZ}\U/[I67pĭI<ҙEzvU{I~5IɹVX($ѥ45ٗQ"=j llЋ~;Yg')ӉkELBg}YA֍HaČ,9#.ΐsjǶH7`48/ADOqq{ǢU-ݹzCJ>GR/Apz~Dҝ4Ìd#Gi˼9j:=}5Q3hÞ2(J"Sn?ΰ+e#pw]vӹ/~{S܁UAܻ)Q`fbNiksﺖhL5eTq!1 OXe4<;,q^YϹro$N&~2f xmz=K7 #62"SJ-oU1&"ᫌ غ5rK$O:\=:|-L5P$ : rHDs^ԚìtȞ1Ti7$nʯV\f :pN$#|EJtƁAn*y)k2Bfdj'uVبvKA|5$%Y ϓhE <?F ֤?*麾h6ܲuSók7JzɳU2e){驊p@,T.]G~!-p0 ĶX{>39)TSƄ#&Dvuv$rGc3VY/"3_oK斨 ϒϋVx-20C S5 f6v',4 ҏI5s1h8Ҋ 9\^S8Fx^bPT3xgM. =[%6V6 #ۻ@B2_ 1A|MU2Ln!꠻Cb%spco;NSWRV(뒻_F$5A`6eoS;.kbpݳHߓ'r Uncɻqoy@-~Iߥg#Jי!1VY'UVyHhԇd[nvql7l9  [Z6F|4a,] :Dy@r]>oxJ51]^K*$,qL'rfJs%_.G ;%}?^+ "єs= Ժ*xyJ0Fh~9]O&]u%qa-%1/vމG?-ߓ M_$@ L]WA>]xx|OW47׵lKV_͵*?_t{ شaHü*4ݮJs~ļ,}Î3l*SG<MeP}zq?CA?H$)\*b 磻ռLfwL2ŋ>[V>LDƟ~^_.h f:[qBpJ+wR&0E2b#5R eW0&.akibroם 05s)O S> IVۂ+|ʷ2YV6ew9 bPx lY,)쑖 3c)a}pOor]ජF k8@%h(1{꼱βRR9&;(\h owRt刉lΠhf8ֈHZ `yTOlk|}Mk?S) Dqϼ?2+ț]A^%y%m4m=^8H$ɍ z!޷CY1!Ȩ G [)[A1$MI}= * ĿאbѽX =~HҶɋ\M#arO{xFҽj\i`yn` TtO{evFxn~6$]y])hi(+V̮1㽕fJZt$7DDtjBeL=tL4*~0e>8p|q8#8:*Y# 轸y-8l}{s/U~^d*ܼŒE^q@)%~Q]Ws{ӵ|ؿTSmi(#B` v H[SKC +=s$M(XKk9:~T"nC_"(%iv_' <̦-*f{~GH$*q%A~nT"߫Y<7L@p/ϾZ0BvT^mQ@%[6%I?.Lձ^`.%qQ&]4,kAR5Y/yj7}h^|2δL&i`_>@+;0]y![,iA;h7*acdH펾 D 2.x] jvtm=oZA" ^CY,?=Kndj7LC:ʊ#UV;z΃Ai z7p;H(YVqX8/񕯤%^4<I|:ǟ<s*arAξ?I)5þaCI柿xzʾc6$P,SR|aS@β2CY4`S~[\1 #ip嵧N3a[Tg'$[n?. +DX&*9-F*]r  6D}Rz0bxѫ>G/O -rЧG$)˶^2]N2$>[w7Q?&0{}w5^QQE)]cJoVc (`WGK}:gy=h>4kb`'ೌ<ۨ<hާr [:l-wvu+,<,#|hBeD>^_4V6@I3JXfQn4 *3𿘫_bRpAu(+j!u ˎ2,bL.𿢉oNcd 4.ۏ%꿨(a8AP͖L 82*iZDܙ^4 凁̿25!uMTJ꿵<dciZ+gU&"Ԉ!O6!IV]&IbqPa¿b Ks}RsԄ_䧿QK3xbr{ο5{iR!}r0]jV ,ܓuj3Mܿ [{nA ߓ殤5̜K~lp[ΣԞ eyFxf:Ũ ֒F٦:mU)KR?iǴPUv. ծnlk q/XЀ?ޑ,li/2oŧY{l,dF)e5fx)V+ /G7t, 9ж$L|3_$ ԝr7UzSfP#IǪO*^GF(Jƨkv D'#ؙPKkw& 52ruv#z'Q|I va^xh R!!m1C҉Depl7Bxu4zWȓ}429WY`ynd|H#(rKa.YjAŔ*E&>챑M<3 u;S傞XfMheGC\0ߚ @[ ,rypW|J-S .\9-[ BBh[a'qr@x8LbN&1^P)핒Ps'R%HHw;ܤebG%m]y1vbxز2GȒ,g/I6էJ_DXRJ\ ~4ޛyox 0b,YG2H q#OCNZQ86XBi)JjHt)^:<ԝn7,`jB-vԈ( "M^4 H_DRp2[^1woD5 a=S3 d@zek=Q ?dڦ `V^%EZJKQ0dKʥE2 njJ߅RSgN GiPQ7є&Y(s <Z39X[$fi\2̴l ɾb+klw(9ňf)6sDT{6"%Gti 9vwfBXG{*4Aed8}l6J-5b;4JWUǪaCk FI*攽6Ѓ#LpTRz,2P3uT񱟣P9{57:@$VC0#\'R`žAˊ|S#O Fo3fC>;ם$yȺ~+cxut48BtLTfA ꭓKtD| F ӸŊ3z\[ q$g&mɖE{)J#<w;n!搳4&([9~G -ɸ)Xt3Ϙ*r EpONϺ=R;[90% ;&u81wu`" q JŒ&cfhⴞ(v(‘|vOP aM’yy_@/k)˜z6_S[l£JܼR@Zmۣ$7Dُeİd֞+ vٛ?LIҿ2KaS.ܞgQi"" \&:b\ lF8T2& | 59x/;0'C6 ș(Sijk#I 5"{ϕ߅ , [Ek#"p+-&&>J BUEYkՂK^pw_<IXZmFtء+$g#])c16uQ=NE?H׆IyjLcK(yIgp-PFa7Kvqiu||a-puT#aͱc:g,Sp:j.̰2i_sp h6S;1qFm. NXIL7qmCI싼`w%9`(6)|x;H[k렉_ӤËOA?^lh{Õ`UB;v4,gUfB0Ø"f9JrÛx5gET\۴&DmãgYV_ֱhê@||r1Ŧ7< WíAU"~^"<÷Y8r_1 <Xq?Ų'9^}|)ު)~Àc.6{`c:=uĦ x5ύBQ}P%Q5g޳9İ!w6ܶߧe4c= U+ {YyV9/N͒3h*k%贶&&4 #$?&+z |X0 䒸i)&'}Wq1*B**vUlyQetg8nu5#Ā#᳚P$M4ć/ 8oQlX2pĜyk u{oTъ2Ģ%^$4`8}(ziݪĢx1O,ӻĺw*"*{(ļΉ1{ny ^Ew.M`sSruTÌK& ~h¶/#JR x6=Mi@l P-.Ary3= 4&r.pMŋ9Tb"2_7#Nu-Mv̨Y8/o7i5n*m\`WА\3D!*<#piT b)a~#xB/hn_  BE~;A3WxNC{%Vv HK)lNB5^KJ"C P~R#=0嚕W6VfXgΟ[w3?ƓEm4J:L:KB(Ś@KO$O{<šەiUŨz::$tdSyūȍ 3lŸ3htW3Ukgl<b9 )M'FPWK]._!ǘij~,"˪ 9ׁ-^;U3/Q 14F qli8b5`,4^"mtZu \F#ƛr Οߍ}=:6(zTZ}? 'SJ^mUag1Y<׶4xSi`4\o#^F=dn+9SH׉PQFM 2hdDTpc:Z9f)WSb?c&65c+g31f,Xol 1rxq wPTg2skDue$. x Y g8YwSl,h=$ ɓ~t%hoGqA_|ƆZ<hWr0aVUƖ{YfEP0/bƘ[9Ot~Kxjƣ2ī]ˬƶM!"d~ƺ)UEfe}%-ޏ;}Je F0Y1rYZ{8ǘЬsx/_C&k13Ģ-1*RЁmR.Ӱ R;zb 'KlkB8&~~` $,,PbpgQ\-8J*NN2n-K5Y4B'pK:144`L*1,` זTA/8%69" &XX9&@'AT?H/'՗!EZ}*HN'3`u|JK`&L lO4ঢc Ew݁TzrCiS`D[W0sHx-Uf jllWJ/MpO2@c&w]ۈFw 7vfͨ*}@H1Hj97r?~@3?9׼~)X{Adžv;u0}`nj -Ys1OaK|ǐw3[@N6ǔ"~1 |)ǭ ('/^ma Ƿijː2O@$4:LTǻ_ˢKuV9!6CǽZ 4w=:ɉG^ PQ*<E4_sWዓ.ykа8N!Ζ(6N&jfCȄ8!Zv|NzkPq;^nöA|`rTtIFρylhKПtWbU mS%}&V$$&1`(7, }^~q`R}Q-)@&g;e0:h=8#|{Ku\XT|'TKP-߻{2 Ug!אܯ)`~vdЉL1&ehLjUu0"m5^sE*&Ooi?9)ثawpUK7]>q?y)=%7ҩl2͐0Ț i hfȣۨwʄly-0xȭ9՗מ6i:ȳ.uNF6`yȺ{QĒ;4Fxt^H54UԈ +r StN܏@EJK\ E|o@ˌ=s_ I>eI5J eX[aB8sUjk" BYT+{ɴ @S ,9n]yقfy MԢy)7& 0*rjҲYx[c "NDYGP(d! *N̸"JVR,.| w/$EyMB+S.wOY?Yi:9eڲ{Q̗`:, % CS=NjRnHT0Iر@ >Jlzkۼ9ugW8|d>è XCS3l o|ZL_Glj:}4n*h`&)}+d=m4 @lS{T9Zqi>AHBɁ@@m?GNɐjz$IɽhɨsxgSɩ4k7YEzKb¸ɰKsv@6 2w'ɰ}eIAL/{?!ɾ&Mlq @beɿ`E@ˇcS NSyJwpKn!CEI0Pۼ b~;4?xܵƘ`P%}o^݇:[ 3P$n݋:xR⯌>I+;/ pհ>x .cA\@:6aJ#ШM Wִnw#5W|K iB^ H^cчdZV_ EX'{F=FBj97GtFS "ft4W9]h[ 5#SkfGIw0Jg'0m;ݡ\I4_~չGĦ~ l4`a0&<{p`--+GÏlƏV,Pf $vqT+rH6yQ9ږA"7Z C(R3E^A?XrE?Ơ^)8uLd9hzBpMRX r9BQ bxS;TVBʲ 3z3&S0|TLUz!3l,]wb2-ʄ yEz? 0ɉʈ^ɭJ(ʋ\ n>o{kabOtʓpcX_:V #2ʚ.xfcCݐ+&ٌʛx6sSB/f ʰALU3?.  ʵA/v-R.󣱼k\GDR~aA[|Ʌuѷ1wULEE% MZ]58z@&kRa޹$w ގ[x!0 >aʅ2Z8IXfI/fd,+4$jāЊ`@R4[$Qh.ݼ1<oH~󶔨yWZ=Ej% PpFS@?ҧW26*.)ҏFf0DJYu.SսS[WŪ |hcI$fhYE3Ttˆ9>-t_Wez-0GEˇKKUkZ CjˑswǴELX>˦X\:4mR.I˧?dѷZ&Ts˫F1ı˽]8'0Ȣ#{-˾rW>V:2QITd34/o~1a3MDy:У0BgS&Jѭ&N5OMImFuܷᷞm4Ru?En6# =IH]4*"6 Pۥ%Ԋ.ޤa~EU6jHǧ,oЌZ]<[)G+QTU9'G NXp(y2%fLc/<"-~= hǎs/&TJ,5-̀75aH,JXẏµoE>R̘#E+kEt,0*ṭb}ʞ/'KzS7c(2g'Q,2xB8xSP|4ˡ9ƥ>QC8yU% ='=$8CeϪR#@O[ w$Pݢ)9NibU IY-}EP )ե!CAN槠 ڢ݊auԄ Gwڂ˾To_;XB,Za2 !l|fV'2%}BPTyr2&W=>lj NldT6w18,PhNP} e/'ڍj~^.h19g/`+ad:f &Ӈ885.+;m\<G Z;g7ܯڹ$,x\tO!=3}E9`F/lD,H͌ZcRXvv:Qr͌B&wƬBn7f͍t+};c͓\CW` KM$͕_Dz48 {͙r["N͞SX<23B\͠{۷A?@,Է1cͤL2kەCͨ$Sgp)gdoZQͺVbRDbKEN]~<+ͼ\zH׼]Bu$ͼbi7] `>#=lj{;rJ䢭{N~qzηHx }\MBj}X16LUJ1mV-OШy2;t71Wb|] ~sf!J> ?'{~-m3 xJ[KǮd_#cv;j!:&jͿ,2gAJd<[72OV7D!Y] :Y|plmN vAȐ2U,MS9HHA+/"{ k8gK+"Ѭ-J&v"zwRϱ"fl>1W㶟8$~"ECWܤcz ,+I*Mۦq/JR.c osaP"oRΈ_{=ipΥplƸ anΰno4U8 F)ο{ (Y3ѶC=$BgJ%/`2\ D;fCIghyoޗ+dWz9Բ$µ_/AؗKBX2sI[,U%/M;t7yjsnRwnes}?HlւlQ O/bu1x#Y8j@"Kp,$0?<T(Z.,x킦c")^q8w4"L1^>p7=Vʒo$l& 7ͤV\!@b:\>YԆ>DImoW`¯!NQW=;"{e孚4> ާ;lS$lwUnQ8ǞrNpiH1(|6< s!(,:<Vt%mȻ-;FhU/y@vO f9Zuσ"TdxzuG:I'ϓFgM5|jܵRϭL͏k\A#ϲn&ۛ!ҙbHfxcӮ<,y>5lAoͯa=UR[)Cv+&t1%@P 2yJv d:ٷqBIN I/ TSVfi_jw:JmypxْHƿW<4N6x(*/;< RR7슚F'x"'B x:QԤZPP`>.Z"Eec&$j*GP l(Y̞AVRtP5M[3ShJ Q[GID?uJ:3]z\]'P P?0hk6@sf| ]|tu."ǹ_Ёz}5cX%TY<Ѕ.,_r2y VIЈK5{_746BУܲQKfO<ޯdмޜH"͠ `!ׂib&גTQi|uL Ey˽ :|}X 67SZ@i5axe*Oۚ Q2UMX,HG WJӳ`p5\<h.@ vw=fzf/21e(4U)RnaO̾!! E醧{":E/V@t~\e]HEgF(CT_߀F$Lvu#gH:Zp:}^dX4{[)")?VFvYezO<uuwы[/!;3і-V.8Pј a0&dlѧ3:Oe๯ѵV81lzkѿ oD)΢w(Ƞ~M2%V?x HBڌ:?&[5;;@Jv]Ro6Y̰9 i/Ÿjy/5A-ْwj0&{yĐV% F@?jcEjN^.k>.F i<[A&4pv"AWq玱6.qJn>RFyHtqYQU $P{ʅS7$\(^MJ\S'e,$] Qe3|ҋ55CoғʸܟgRx6^sMҘYR h9+]`K?g5ҨchQSxMVҰBF!Dʪ+5 Ҹynm@V}3'Dҽ]h3Hl^6 Ķ@il<"H`gD pKsDΦ;Kk;v 7>`X,l+T.SZ~t֖ݔD}X<Y~pLZ xWZouF/zK2gJ<xΕqxn4U w=EՔnI ,SM_<oPZ&پ-fv*ijpNx* {0zQ6geIT!$Gjy}MBr5iKmP%uVݗc[-9GE 9 %ҙ7{aGO37AE2V1JfįiNR~z4 wBFV^sZ GD:cX`bMe>uQM(AiSv6|s!E^-/|JpMف1 Xs:Ȯ,|ٯSSOAo۝zLlu;fU/Әb</ikk`eӞ6(k]h^}GӟMjϖx{I_Jӥi$گtr|Ӳ9Fkl ӿQuv&~b6P-<:Z,3FؑOѼf0MDu ~jH\md( T P;,m/i#Юs$  p(MI'0jĤ&{/4u>>Rں$yp1pQ>&]2E$l|~`ES+} jP< L/sӚokn+ٝ#ǠmԊƈ_ZXX2)Kl+/4;LjJӐ%= 0':5bZBoMu6fV N3?ԼedF_ӲRsɟ"PqS|n۳*.NUfeG?t? H^f][2Ƕqvԛ qQ<4ݿĻRԝ׹]f?5wzMGԡfŞ( ʷz03|N`I([X'sFsH6iHPXI{+_h58v)T-Ƙǻy07.sbIY MI0e&,[f7&w (^[60 *MI̘6_ aRSƞt.?0{4 {΁E}B$`ɲXcWӽ?2L%A!9nXÐTѽlH" XOTXL$#^w=h [" 7hB @#O׆{p^gX?3$-.` 3-'Ywi= {o IZPm0=;$>I0"\efWXH6g]%㢍׊ggV^/x+&-ڵgl8Ӡ V\!jkwJO)Ktn77g1#˃Qrx,:  {<χ_h0VjVۢՇإݞN I>ՊYTr_đ{j|ՙp*U|J՜ేjg4\՞4ۻ[#F`D=sաrPjI4զ)~(Hyժp:1+#ng[% մ/)% c_յ6 ykONվG]8k{IlQ%_-Y{o˙튎--׋dJrl{E085C >b :  _K=CcP*/ l+(p$M@ݕccz<IW TّERu|a@6LV :e͍I/>[#CI"m/Oj;ǼfM ,L#=cE&rFVsĔT{pO %$ CXFm"huCUI e!-HS3Ƶ!'e=OO+6usW[49`7"vgo\?U\=}g0 &YEiVs49GB3- LF#YRs? 'UP'ꝷ;Qgo:OsGlW&#UGjEv: A=9ާ#{优$H3qO+B |&X$gYp RևdlX<x3z@֜\?c:_֞ 6 BݡK:9a֨qu߹V֬)rk2'wG¸֯/6.Hr>)ִma 6&<h\ֺ=Qe̲Fh/6AbV]\Z-$|^2.݋ ΋7FOt6 a9.K*Ř=p7 F6Q`na"T4!&zɞ$r KW l'Hi>} ߼F0},u7*9un4mSow?[:RQMPA7`?^-H|PռM TNG5lJ#pMX9-nLЩLa25* /Iyk]v+joy4ۭz.$s{ l׌5$*o2 ז 8@$z mם~5y'.o.,pR;ץG6&Pwntbץa6YO_-צ1Ȕse0&Ċ̓׫\ZY`|EкZB04%%D>q yѫ^KغQYI:0:ՅLzq)2<O>kUsAKBjfcVum#3b(r_0`$*E1 6p.j">Mf X\B\PliE#YSk4x#/hCaP,eREpF//]Y!֥d2ɼ(si 6s^8%<x3ʼn ? n8+^T_V["Bk~JDuQlLѷ)Ԭe*y=]HM\'7)s=$^)O z;9`79@bޫmpqZcg3baIkؐ;́X5HؒZIr&ܺ5Uؚ RHmYNت|'ΦF*΄n;uخv Z}rخigv@hKط|[om.#ظ7qʖiʫO>Oы2v2 {81>%78aSlYlEJ䱬}D^vGutk7G Z:;H''D1 Fg#Hظ %8KW +~_ՄX* L)z$eh _rI賶PT_¢n~b~ (lKwI#!VpjJ#,]j3nc$0"zl1c?sv27i+0>Z=1FfQuɵz09So4N~&;:o5VI&~]C&%S`(ip!Xs!lY8KEC`\"Ӵu S`iFUy{-!=ն~{*GcG@ s,`Wـ*̛˜٫e0فb) ޶?cy/Xه~D`x %\̍ I ٌ1Ӳ^e{6v[dV&&Nκ=4:z\c]j MBI@8bݮ(2>t9y@G3qOPL-/L83-c]/MvIྀk9Yj@ɥ?)7bU6 nCHTZMDP7lĂyMr&hߤ|036Hvi2sTQ%\3u VV0Yɓr?ބ`#X8"w%]0NAT9"xgu),\Gdh=OnZC;냲nƷf h%ڄJGjw`ej{nڔ`38<&3w)i2IڙR$JnWI LmGڥfqa_ڥX{6XLڭ~D^MHAڲ+坂Z;YrBڶ5Pgp x{F\ڹջxwЏ"rھop6ѝz?'3tQus۶BJBa4ՔLK9Tj zN̓S,G6g#?O䧮 l62=48ZrFK57"_Ҧj_hLpa b&/8;T4b }Jv,\< l:͝Z=`Cx}/sUDA 7pĻ.G=d3*"ZY< <vq%Zu8 5?b*#WA 6Wt6;`PH,'a>@+|9VEDʰ'KцIWY(5٠n2M;n(*R>d"> W LCXW`GH[l ٽHy] ԍ^hY%i ԛt@ G#y8yC.l~NT'"ۃ>@4kŒh7_"۝._  >aal~<۪Wnv3L۫H^,V[bLۿ-`ȭ{!Dtk ѣ<PJ[Vj;ˊqGu:&>_2s;)oNaJ'$;EyTykU}󷼛G `ɾe77 [NvRb2$rO^P*dy{GXLĚ\f<hӸ"f%g86ZKqRQ] Fq[^њQS6yv$-B\c qK`haШeUjE2geK`r Zu()vYWģra8Aj/Ves:ol2-m|3$ ܁ol%EINܕ`./dVEp3sܬcJFy0d=ܱ஗?sXVܶ1JϰL%H7:ܽ~T.9;VàܽWsXŭW@,# >-O ]ɯƐ+Bg3M޶WM/~uPL+ca 8npM7t"JՒ?_!O'7 ov(J^O+-Y XȵջqrnB4KەT& .jAQ{M͞fiċ? hd^nx*<}Ŧ2\*B/7`z hIr8|,¿i2s,BF?(=8+goU"^v,&9m)T>|ZNNWX2/!Ul.{*|V!< &6j9fAV4id+=XyʲK R])2!̇}3e<s6݌,@s.NL5.ݍ>+Ő2_.0[ݏ)0`Y=^ [ݔ]u#ߪIWw@Hݕj@B]2ZLݗ)QtLwv5'6ݬ35erݮdK*" ݮ_NL]{DGݳ*/j*x="fBݺP3 Ay%ݿJvƫn ea BZ7lm$ ?gk]=wݕ:N -lҏT^]ŝ7<5]oFĴH쮀 i#Iō>60,̊y|ng Vzag '(BRH0yunZ7'ӺHe ZZ`]{4 #991&`MGV/t'ͧ,ɪB㐍tN)oFgo޻ {*&g Gx՛N,I;a739Y9.$3},HyWbtC;27sb_D!.gey?+x=ڄhu'SLS^_|qUV1PЉe;g*k\(#ka6w$8Yq]3{<1s!Lc3`W!/M /L!.A6a<QD`nX-w0at/~`seΔ'Z&KQrk 45H:H/qn(0'9+ˍ곀~c(nhY>Uބgd~,R&Os׼BޒbF=S<[zOl}ޖ(wCJQ<xޙ\ݰp ۪ߋ\,ޙrm, n O ޝ@ NV5Tޤj^jDqaޫ0ٳb Y\_тƾVf ަB-XQ&͹&DutS hL%"$98'PjlIݤB2D'zenXTin1H#@s*~b.k̪=bbt!xEF4J\*OBgS >\~q(r[kV%[} 5AV"MkJM.RӁ5$Ԩ/[v-TGan^Pųt$_Vcp NH)EL[ G$ˬ` \.;]N= ]B,Ĩ:߀j`"SpB rD-$ߐ땵@x2|; qߤ)0۝ugυ߫<:KsϭR*&y)}߱睢gO&W3͇߷55r,9߹x78. B|a#߻~F+`ERKw@߿VĢ/YJq'U'. i]T҃|N +$ `T5@7RV~xiy 4߁yELjpj(ȣY pSM8vCx0Z-Lpz̮̈"8+!/AP6\ Jtr Qͳe*~?f:1<rqhr]Qń4H@y?+Zk~'^APcl Y,H-BVF#ƭfjT棦Z[pJD ͐kwh9PBYiy1HVhcVKW&fd6 \Ugg6R$z !CrMKǟ;@ȹ&s.)ZR"u_TuozfC̲g^;J)af>:NWe-QfvN9WRЧf_7e|{|H̓XFԒ[oT"Pj"{1u:zkMM0!{)aH2G+mGu`˭ .PjC 'ǖ 4K9!V**n#z5-|+ ;(79,oY-&/rZe5%YӚ87&K ۾&s??J :VfCͩSXTt DA@ F<EWd|H. H@a!4:)#1 TP0fd St- 9 vn_ z쁛gM} )<MU<`S"2I-&A0͠//胧aᐑLz3Ԕ+o)E'B3m7HbsGllR?SVkᙩb7-"bf^-ᙺi ՠ%IQi( "9c>:| qJAzXdJ4#:lAe=$a%N_ᶒp3u~|?+%2ʄh7䢈l:Z mBR?Mn:XB1} b 27\n"cӞ;y"_j.?ႨzGe|k&h2(y!jx&7(=CCAjc/cps su4܍b( f~]J"9b%jBA %(<CbpմzLMoոm$h% A`Z' ^^.@BX$cD`4>{Ggv^7.C9m*=#w%gE_H_mm=+9 O}#ڵ?YHɘtOn7=FTyN^w4O▥g db=)Rih◙asN)Zʐ-0Oz,/tnSI\qQG 35ۥGgA`⣨.ЛHl]E1ǻBbUmA&5%+O@u[ŐXEtI>Y6|#ǣ⵹gam|F#dmC}~歶`歮#A",>C \gd㓰eCua{ KD7*N]7SC/b{QEٞךzEZ)fP堎a=H9C?ͤn/rB3J>Џc2/Z )Vtp5α+sx E?}J|:T_V |,4l5)rpKGq~l`:"H,"i"F{hSq3Q?:̓`-IC9n R}(ؔhLD7j.}7HL[j=jhR/ OHQ& YHǣŖNQ'\UBA}Q6gCdWo* >8BwxȞTXxѧ}<$mȓ\^ (gP–^BuYTD4QC_UBL7V ![`E.#&&{3 AQPaΟ[ յlSf2d:% ϩS#gȤVr."Amm1ƪ;pIIv ӵ>H1ػptㅣEjL;B!4<t5)Hժn0i,|R,~\ Z㗔]uLHD*,&,㤛-/sK)A~㤱[j 3(27 ܙ+$'XZ+4Ȫ0ft&FB/Eސ3:y(@:㴹Ws9V::(߾až!]NHDnGwa}w(=/ɪhgW=9h;p-~LFw^bqq+[ Đ>t)GhERdӿ%4C:be{:ClS,!a.e[q,#S|0۰LM iɝhdKNBZ_mlr<lva w򨸷N9aBbFTSS,ğ+unGee.+2 ?K".hTu%LPy#9xKJep:H&ChP 'TB:BҶV#"MI8rɄ8oiMӇ%VS4<XU M)P,H͋?+bnzNeX Ktl]8c"ZQ n{dϗҮ\q*hE/~ V?IY~yxzlw.^@-:PA2䇚E)c/[^d΄th,.y(dg 0!Oϳ8;諾ќL0B (2C 䖎&R{<<!A\ډ$^ O&h]5䗷Y e6s*pڗ5.V y_,i'K+L[[o:6b$䮧-3%*͘wЦrZw䮿? CTuchtY:hpɧ# iGʼn6&#l1)*ZJ$_skVݭئ>YNWtJSZ1˄i3/Z hL< 6jAVvì8(mȅNm[za($̷~ֳ9@8ƥ2 ĠB&ɞx_j]2et+(/vS!85 [.0@^ƅC/N f 2'R0 $`\ɊP9By6mІQ:Fs<J*Oy4!>+ruUի 5Ah’gL jGض'Ue05'?JRQ '%*5Hf;(1 ?jIbl]#PNbII(2Go@>[$ĶO倗o k@LMB0n嗪Ewu+_Ԥ*pgQ`I&0y=}~dd_b6ⴴ6U嬎d:ds\-LC3g+ "Ѝ8q5c4bGE-釥ka@vLx8idC'#ڂV<@;>,IFnnJ#|ZC\ ͌HHgx\lMD !E@8QIܙKV}1Ԩ , w/<esƾ.̝04G tHpuAU4'/u.m hR]\p8 h[K8Kr8~&(JO|5ddS*nrx!B/[x$aAq8?H8) c[~hs@@}1mАn,P{P@}!KPg5,7S9&T 5_tG2Zє @^)S(fG~{Y?+zιgmb.,`Q‡'A`l{5{wx`t@15-|d]aJQ(rr}~J _ mz慎{;Q <,v-"W)nwF 4YR6&D[;PO%)2p$Dh⛲CK)wZS柣CU@*.+M[X.V, (洀ߚ,,ESS2_滀(w>F9Z`ss+ ײfg5aO2h臅g/Z+4,)_ю] 9^HNFA: aPa9:%ޢXpn=,>ۨ$at;<'Nkޯ-z`. ^Ɵ $ >T{EŹ?ָؐ[0>hW7 mw\7*6v: } hC3?80Z {69NНKb4E?)Na i4?/'$Z/.WСF[՛m=Y$0  ȅ&v`$|ha_z 81DzηhchtrhA^m͔=:ʣa"uNDSwd'1u"`a$-T0%Ҩ҄0^Bs2Bz\ҭK ex \S@բ|~׆绩 ֽw *lzcqb # l|_^ /^Ŭ|cr'ɀ՟Fut+̩z9xܘ+Fn9iKL.]ki!%֞:6u$>-z&-̗57a&$.:e%NSwnk%1*'OfX:$ܸC0Wth|`B"YoM%bzaa\KP.4L*5>d̓0˶z!VG͹ ܾs< hpgYbQ4#S3/~TCE#欆+$s R`Vd8$"'vm8o)=\A."!t)Jq=,q4GžrB[/g8PvKW uê/8BLSfm@/~fWB\v'Y ʮ<htuVwo E4iNz<" ?iXbA/Q脠YAW.VS @U3wk>v:Gy1i%Ԏy,}^#`%+uz@r>Nwplʍ>Y >,ˠ+z~h[ $K=Y^跰Т:{St)`RXn-H' dl']`aFgЇԘ*nd<LIkc];Y\M҇Qv]LWD")*t4<e4Q4Y1x 82hS;&+e^N!r5Ɲ[I3"%/&mRXᄬ=nC,Q$q[:݄]MidM>6>fl ԩuY%VYp󭩺07x屢#žQMl1Y*afjPMi.kњX$$) a[ILZ1OAYxo` (9b)5Lo =d 9VØs5DB"?fQD続 ;'@( 8+>܈ wF $J+@oϦ{sҖQ@Ml %:%Tm{СѪ"˳UJ;8`>ZEէM{V>h oQ:)"Z<K*$ޯF%_ rw_X?(iֲc'V޲{J2a.i ZUsxHYSsjGZe/Ysm BoXV3I@2,97uK x)Iכc8xK;éWm;y鏋oߧɬ9%9P:BFNDH ;%MF+`gD0 qXIF}akM$-yDzyHzVUKO!y!(|`bcVȘ=QɷEE;Yd_<ϋC>U#' 4o)^UZ<Ou76k7{lg˗7exU$H.{ UfMI{Z zDR9޵*^YޘlCDik Ѷ9c0=8.mӖvcX޿TW7Kwpӹp` zbs+ldzV{&)zGw Q:z=)0m dX!Jnl,t4\< $yg6꛼`"e\~Y&'wF6ECA}/-:<5,-X!Usn&B韍+`)̐` E0}8!He>ASXmcܺ*z'Z$_ՙ0~KQxswEPz-4d8d %tRd8&jI($$-RT>XzMG8գ/Jk,*fc vq#|[ 8#% hx]hxCef )* HVhsQbEtaE4)aa"SHz&ܧkQ +F*39.39}ej*W$x=_6ʊɯ' R~nqwNLfU?[D}ظH~#ZPX00ye:zV܊%p`B]FeNZWY_`hփW[܆` UMbh0|{#ޮƧikSKvZ$󕠋tR j4<2x0@ϑUx(vۃlJX p,{ׅ=lkWll{g[/*:X\?3M:ژOE'[R۳CfRs`{`2 " 1'W6=jI$"gʿcfHrPڮb>`<Ql;o8k僋b4뭯qEO%@7<zv볍^5,#LyX@獫&awщs8Y"\qtA@2%_APi5~Ҽsǥo6E(@i#f<sx ("E0 y#%?iEQ"Uӓ=Y_) 5~ (#*w :\oBN`8]Bp*62[퀮_FU D¾R7&rb5jvfjF%$diUJP\9UF,0H 0F^0?4GWDKCΌNhQ9vmXP1i%WR~1C+ħXG}<Գ̫*>R]([̹]z۱:w-!m[n$m ^ &.t;%_u!Q%!&Fl{3ٲ>aգg6IuT:UO@N삦KJ ZzR%]h BdK3lo}>o49F\e1 싢B6=WzS9v\#8ﲄSq:\CEZ'!5] Zc}鴾Zt^-һԬqhFE{M8KOYh)~b}ȬnTilg_7WZ\SWxɉs)ZovaZ2s@14f#u'Y`Y@R-峝 Zb|u]auCV9VdqgK<S IɝӃjP>Wpd1Dīf<* I `?R %P'BS,~z }HӎmD\չVn!F<x!‰e uhs8ݿ0XaIY KK ѵô#iIjE_VROl5oaa4eF,:nLzF`TZ&|zRI][V8.Ukד?BX]/2͗wxiE\.{D# twk˗4PxMqs\b'U/ rB30a:s Yr%8^KFG9W GZouZJJ%gv *Tf@"ڇ)^C홺m-8H?#-BsIhiUmPXQP-ȫ3!SYҝ 5N`.? e]Ԕ>kq:>TLaډS@}sۗ|QLN?tX6h9$&pi\Y|NW`|,Mp@ "^] BW -ԦOkq 㳜t>4l]>jG9Զ СyJx X.`!O/q%-SrQ1$ΣQT3Yfopg1i*:䰒k-EHt^5$DR-&%=OL%:ot,|I[nM=j1Eu_E>sn5<K3R=r[ZK:QW~ݟ n\ѿ4[XV.1Q$i  X{5#$]ef:Ѕ2;k:+K Uln&s~V'rnXuAhzo  E`$ Wxhõ8fd6gT2=Oc5*Бip,q/>HL}Qb{ks4 :~8gڳVHT5+|h;D1[M/QsT#CqAE1dqex!͑o-mbk^gԷمOWDϾq3Ir _<>2]}bjޱ@(:+=iz yA UʽJe2"#'[]gB _%7rqf-(dU#'4ٵGP9JfDQajpLb M9wo{xX!(S$C "9CXF\x ,UaOquh7OK._A컄jNvr"x5!%˕S y }5$؂pƉT%D:+^Z*+Zk9f90+#FkGqlQѹm<WGcL?Hb=9 !"lì1v`qJ~c8l;C<?"ؑ]5&'タ<#q'W{iDe  :)AG r;e*d  Be)`.:CbO #37][=sռmw֍i{tsGݱlTfvmy4? pS3%*xappȴ\.azWc6ƚ7ZD'$2 6zŐԞ"eJsBxU 76\ A;J+I?~>7 )!<MzG1g['~~~PH) 0<>|8\i>GB =6CpR ;B3P†m&QgDrĹ8AWyt^S5RӣwpO:Gl`Vw@OK4(1:|!j5a^&WLEŴ~8!zCmzm wY>d-I"-Id.KW 5#Z5 D~MT nmܹ[fܠ=گo:P'j!"τ!A:PHފ1|m԰4ę[c8d˴T$R%ljS7v-v>Icer }&AX -d7D P-V/1+!;֥3!>chB^U֠;Z̬O/KǍ@n`{8MBLRRܨ]pFG2R0@ej :P"BXZ ]tB&dͦlQZH{-pՎ<ޡןV˕#rJF[UppZHHd2ݖMlm ȳB <r5T^~w̜1W# CÊi5"@ӃU呭I]ϥxqmn{E:.OY"a񶖵e(cPqrT^+:f؝TN_#)̪GPB(P`O}sXwJE+i.,Wkl58ݢŇ̨#}f<v i'"jRUd~ O>T:Fݗh?{_wΖ&+2ߨvT?&7;$0QPNj $3V4d K~kQbeѷdAJӁpsGFOJK܄[Ks&Ǵ}Ƕ/[AE9 (m=N{5F.i>I<a?F0Ahj21;S]jF8i=|Q 5bhQZJF2ox̪f2 Xٞ>évg x6 g1CbgdJ[%of$֠q{=L70 ,w0<5e*H]⇅0mf`GKm_]zjDtM(sĬ^-@nsA.;7!򠷤s$r5W b!M41hg4`Ee~^Y :.VKq<آ/ytjW,׹bs g.l~<G đO،ߠQ/Y"lWcq{/ ~+y eձ+ylty\1G;mZJ̅ܚX[h##3gp3N?t:QdW_m_d\M~<c}N<ߗ`${m6N<voUM/ytVh!TOd$kԬ׍:Tm/g wbKj ;iC2Anhbòܻ M_.*+uy)9U 0YAN~ZdN0J`o[UU HM jusضXڡ&1\:.:GB^ _DzjΥ;Afp,m>9Q2IJ+mMٱqf8頛*rBhޠ}HPOtL3`R33gdt?0'ʤX6k>FZ>:ΐ3 Y$FuQ5S^>q~j#A:_FеM"(_tB,󽐲Fh DN0syZъU'o΅U&TcɪӍ˕6u|d?ewSVMD@t^EZu59rB,1L'^z>jjgh^G ҇]&O(,)-1ʕx!f|8+4qg 2TX^1 ōHo2%xj~4ջ2&Nض~Ta8KΥ=cˑj_; &s;#pFRh4fd5䰼󄟄lIk782~laxVeyhY%KGͳhѷuԎ۞kb΂(;&\>m~'9eEʰl":I#LLz+Fj:LBjfEMRZR`*hNm {CGC[lL&rdJB 5-BݔWUUCQ bx=<=3ʟdTIi*g" -E#iP^࠯5/Zi7Gk{@-u LҠ>X㧶) Mϊd%$0Th/ʯ1gE~[awEBzJMK#gmW+z#XT|7]q̔d^J*/nv_F;.0t}A-E;kj]?D<^v?05Svʮr[6Y<pȜWDM8Nd6xkJT7j)=ȾZ cKW*jˬ}~S^AD NlOr'8^?rksrI}tl>5#uoD; \*BCkw<Q'nE[W&pIyx­a)"K7>(|A.MM/ܲQ]+y][ImlGy;b|{4J'2Ac ZA0*"^ "q_$gU{om  ['5GfFX!Ii4 ?UUa_Lg?o7+(%p'OL9t4idIDMn1M?HX&-7A7 / i\UQZjk<Hlc:<uw[ujB.3" inp(j%XkXٲ1LjptłRp@:uB8dcNpօB28XCJlXCL!QF@'͒ NT8F;'6\N(b>`&#?݋X< b)!jas%dWUbəd"i gFQpəXP=slkU¡ ?k y<@xfT^^ՌG(RyKDs99NJ gid]׏Nj{<Ao)Q%WM33/O`JV`05O=%AŠE=̄Rdޟ3N'a{1Y;(]- m:QZHRI+ި!g'"gy*^_=S+.Ȉ$(~w~ղ+ ׇ~R&ym|<$.dpw륱f9  &a*[2"٤% Σۜ\~2Qt\ΗSwQE MԓyE'|ϔguQg6\ k +R+V8|6M¦ίQ@LooK'"i]nfIxjU(Q^jwLJCC1-!Y~NVrq=]=FuÇ'b6M#5dT5ŒoK1jb~ ՠ-zcS;Bvk!)~'v m7k%"ܢp"A_Yv^7%CІd/9Gf=>L"xrZMz+FܰEv!|K KlD[Ry"/F Y2$}j!ly3n"K o)ֹ3`EWzO6X3`?gkXk'K `3w>2u9K`dKgCz GI%nH-SWeL]@H/=;n e"O1su&\-#wpM#cMa9a#OAJ3JT!>\ri|MCݙx㸋AQK]a S ƙvR(ˆsT-q(NdX8~+%u~ CK{TXv]l*1fŽ-fPgW5j,O%IlDyO7M3Aa)ͮ4UӤG-oO` %6ٮ X.9mٲyɾAfLo/Zi 4uಲ>3p~W`h aWn85S{  W@Hwe4+zYZӢ]`f!:/8xةt8N:igK'un]= LϯB㝚2 -.ǟ!󩣛 'P-0~oXS$|}81|!&Vň<\796ˬgB}'Ӎn;:8`ؒ6c4*0: k$OHl;ߢzRNtf7RԱnhhb+W$B-_:yo3t:aY<F[P}M jcar-ך}k , co ZLlPOYVR-i͸`~b#:2kMBEp\ȹ y_ torJʹs<|+;aI)J + ?K' -+2?%/eMkF"hյ$~c;*$J.=mzI  Ca5Kxg/'k 3i\l!hh9ݐoPvxF((Yn]Jd^Ng" ԡ4C5jל3±'?!h@Er2,R]+W2jÎ=M8R~CeMw(ޔG:L+ 'Y0֥Gi?GyxY!afkIm#]aJ_'%z.<~sa~>QRIXVia7ƾJ>Qj(4P"@:oG0;B]+ DG̡;X.~OELگfV< I#p^&f5Rx,C3s)؃R ccx/PBW" :tjX!ؖ&TY6R䛗~%'x62\b#f$ քQz{WFg(}1$ͳV ;TĈy<lEZ)0`O/$t`,v6<osPlZ*,]ke̋JKLj~=_/EbT\ʼBA {vc83{LfI(8_#!A!$Gn'tebsWƷb ]&w9ߒ4~x fěeA-&qՒsh8a^d N487)=:1s./ xKҧ=-BP3 Dw%g73K|ՙr"3ߴV7]F#wV޽ ??8R5F-*-|ƼH,&= +]~hyѤSPLq6hrH;굯Z:fb` T U҉į5wz_tٺ,@VzIkn,^%ld2 l^wӥ=>+NO mUtZ$rE1{P=I;us vGRh.џ Y > =0&3F,عgP lc^~%.2Ʉ*q#g 1 Tgo"k?f(^OSޮ}8V+n=<]*+> y_%Xorl\Gl`5AY4=wKFAcUPS)7T6 K7[֩pD^[pq- QxMCPVcC3Yd{#7ő?rBWτOi, ΒNg}$lHI3ȜPԯ{R6F/ׇwYe$yF`<#o( {u|/ d忦6]J8֤狲+Y~Dkg$""7ea]*lĐj|TnV5YUd*س[ DcVC) RaUQ$bTkVJ 7qJ>uynȒ+ofx665mԽ<93 6Y-WKNf:t4vb}d 5¹MBՖD>Z4耏v'?(+Nrz=KO6{OL` yTa%0xMVU4u6 zsc\`h17d<!f/5iz7_҉n$`kĜ2=#.oc>Pb<58 t=6s8%xhWsb جcyg vj|8\X#t[ JME:[5! 5 rdqZQIYѤ3S̕;Ѓ;B_${r{1B:.'l)-2x!e.1§NjZbuQ]k+Q Mx%NHz]ƺmho]% S570ßV|f(͞'ޥ<12R=KߥmCAHa%mrE)-Pӳܛ]TOHI`k7 TJ1 ΑJۄШӈ}Dg$JvUGOLҖfiU=pü h OˡtC}cC-Z\tM0E<6vPȵ2|Q j z3 HcuMn ~~jdtq3-/<2*"n#E(Tcn -ϛoX&ˆV9lSP88aq*D/v@D+x_^NZ ܓV]qoT,ܲp= #== ]bAކEٍ~RkS<P!u%?Ml'3&!"t.@2d?);ҕY<T&ԋvHq9A+& tO-Vs9ht]Q*)pU8sc0H/^6s UJ17C o X 20C6ǧ1 ~cczQt*|l/o@>"Y׽cClY"Ջ[fE`j,,Ԝ[aVBYUbct=F3,K) NPN_}i+6S$*wIh@Gw"FҤmH#M%@@~J1aoz%E&&]P/e3Eu`WMn-]Sk Joc MG!\BfaBu` }{9R&OP~Lj-hˊ̦W-_G,WiED' h>7l)ٕ n ;c;>8(ܙ;" #KO{نuZ)Q7i1Vuh`Vgu3݉4F/-IiJPeXL6+mxe; ɢ ƨ)^jpL$Yo WeۛG405䫁*>0wx, ߣvOcO1;9YodLE}y^=al8aMU; 9;#ӢVx󤓈٪IϝC[V!cw/Yu周=Lx/8(2-z`wNFcP*qs}Ŧ|ӐP9!~Pxf~BD4P\>6'I(<_ AXh(KudL"I.s4t[7z[YPqW?Ff}fR5aGT_R4vK֨gYl%ү=[Eӌ3=_R ՞9Aw.6̏ifq ,=8"PwQfH;F ݕ\W5V;҈STT s. <f"G]Ēd*ϔ&Aig׭ bZwCm)|O0`s~!N>!Rպ`v1'o0+N<f_VyN*&eF`T*L?)ٺDMQ\B Ct)nFwwdrP˿`pԾ]!Rfuv{yq6Wv_H;(@׆9R#<Yߞݖɭ ")#.2Հ0B#rF~/B͸mp'9s{5Ŷ`5vmX`^ҭC75*sl<>ꖯ mBAf5ٶ5$ibzmhm׋H @TRl ;9,RyvgeR;V]y_Q#Jf!@~1D<#RZسA*I:KK;=k uG;\]ґcQOBLS4Qe8kfjCn[ØHqfef1jf0h`gd`\()#f5#\O{VCT?Ýx% c.<C_V0;ҢróY3GJzY0PS Z=#bī/`mlTR3Gd7eSgPBA8Ro,g}3+躻h(pL\<%q5ZĶџW7J.HSM#Y@q Մx`L|ʖ6L0zsy w)F<<TV5L2B 5d6ܵ$? bﰪû!;]kP^ێ~)1,G&`jƴ%eBoh(A*w=㱶Ya>AOnzVs6]؟r![({(z.>g ˑk S^ʼuOWOZ[F5 V>\>Bof.% f7j{ǷOڵl頤iǟHBWl תJ<(E%_x:ydǴʑד1xOYQ =qpfH@XҘ(J(&<(}iŽ D?_2!EDa^Aŗ7j(&dx'!Q8N=Uˊb<0'zĞ WL`:OZA8 $D2|CeQɤq20C\IvÎcRhPOM2r UC85g >DkQ1*Z58Ff*l }/Eܤn1`'FYCnrF`3̉Xu'x ,ќ$}8T;q銝oW'A1$#u"7ƿ1_gCoǀ+q#y`8ǶڨQQ/sX1qN8}qg9FLN0WDC*ҤA+ԲfQ@d}@:<c #ea*BK]&e( 4S<T(S?)i8zsMF[bT֠W퀷x 6u9!4,b;Pۥ37!7g%Lls]sK+H5H˶ yaan|<<xaЮl32#h2˳|#c&YڹJ4Ng\J!础u.C{wʮOfu; T]n3J)04q!- ɣ_V7D!:gنIw])B󭾺,n)#~l}zNE;?=6Mj8?O{)E6kk%0";TRbk ɨy.9:WG߁8Dns%|5\CFoϣT!"qf ˩z`ڠ=^jz4Y/#X&c[WÎ'm_= VL+ 0~.Zե~7j,DP4b̼M6V]Iϵ|CGߪ(ɚ*Wj&8v_!b^mNs,.0S!PZtElc7>-K&dE.-Ku?р 4Ud1E'BU!cd)p>q.J+)Ƚ݇4PV<GOV~ו̧/@wjsA9pʻBVT9`j&#Ajj>bHb&!*)^Ռ5O,yYQ&'Sc;ߞO$qƷq`}O\c:dQ+se x^h<>26gV_ n>M/1aU-B9bx 0,%}Gճ]|>dD6gUPI L0m;Ul++ɼd" 3$HcM nj1I 5I ΁+dW 0Vck*}r; !^GyYD$Fu-dܮu;$$S{#X졫??IzС)j$>c""7uM zlh~@=Lh wGY%NѤvI)=QA_N@y' ‰+ [?ṭ)u̧sL}eGKs.].KcEIŎmC1%|`$fS¥aieKi+ [A<+E8b i L. p$K,'dޚ;&~苼4O%~M"/ $_OJٝq.EՅc!dsQdlA ȬF&\?zN?hC"+wƺ:j^;I{Jr:'oP| 8Zqt\%Oim<Kq tTeu]<^+W=rŃx̵&VФB%#bbS-p,09DZ.kOVי2ЃP;62I0 Cg<CIVnl(# HZWRp9@[{ oHCPX}#gz, H2PkZ#8zfze_6/i ‚f+|#)T3^R5ZMaleN/ ΢\,1;!*.^rqNu0Xi/iO]Urzr 5 hg'V`"1 rH*^[9 1B)B{$ utIm3zkb"TW4 #Wf񌟖 /h*bJ.YyeZ UH1& 5x W5c&Jz|@pXÕd+hu] H-g) #Dmf d9 ¤M4ܑQ:4:i_Y72^yH5&~iG T-CIDfz$x(k+#H$O[B\TppjO]mv*TGʺԚ5O W§^rQŮ1؂&1 n W5hT Hn* q Rц"Tbtl?nYE3N3 Szl 'zB^OsB;h;H @KV:og%cHM:ЉJkJ%S^%'kvDDla*V3oFD{S.r+"wCs>ގٻ?I<6 HhX nO;ӥͼe 'I:]BFEAm[P^4⻹tNʍ \G8=Cb/c;HcT.EJHP/zs$Bc90˓?D `i{(1DUX? n`>Ud>𸜠-$Bo\\rҊ`7r: o(ȓ)L (g LCG eZ hºKox F| ;[uΤ \u߻%[UcYIZOf EA@#>Tra—@ʒ]W(_ 7|tJvhx.m2/E?*$^s4ȴhb;[kd<O78L)W,`Xhj}%5ER@6. N .9lu"4PO7ZNo),z &u1N3%0V- -׽ 0^O324UFJv0K(i$c0@hP\__H>9g?|+nbHd.xfDS!5$#P7SS>;bR{†9wQGʂSqImWR>1Wm+م(Yu6^h0N<x+Q~Z|") 9y}]ZnI}8L=2kúɋhdIބQ=|Y/t4]Ō¼tR4 *kx5ə pne=F1I.9X=yHrHr ÞqkpRzytxYv*sw0W6bbVCnayU" ?r폒IC*R \y]}~PkkU_qzno$h廎CvjQ t_q_9[)nSL0$ +eu|/!h~\;}K_^+o@fXM%r΂(sS#T1 ^yPѪ\"rynTy+)DGϼ2k&Λg>\xV%qOz gyhPHX|Sxܚ#E$CO1`Xgm-In m{t^Ҫ؟-l`K6]ZC!UO^ Ԕa?8/}UgH=$( %uUk2`2fGUdձ-:<x\`U^"bMV2pK*Ao2 0~8BgFgA#|N6{tn {a4tGj83){vQ^i! v5UV p0οs: 9s~>EHōigsN!<@S:xt <E0ps0[1z8]X͵;>h6|hd!"n$Vʍα6.?V-ow9V>hNf@9ز00<S$_ YN|[j![R Z.E=Az(!uQ@Ч hgC<nf/7<7} *dcBgu20`mDҼ8p~]~a*>rDf_אY/R1#^Aˉ\sQT|VPUDF0s@)+ݠ&b_ jpG=>]-Sa굫s%4H ;!Fa؟Vh+/BIdDy tH˒tOY6:- ewa_?*Ę9,zXmI\h_H C <cr59Yd9AMOƯ ވsWeJ}tMF|~wX3z,=I$jA[ԅ EeG>Fie͓B "8*,–DDuu+V|/$/L?%&{ш3^g';8B&Y 4LSpY)۝v<ˆ%NbՐc7ߛ)i^;n:/l: & `C oz Go 0iN?1W)p׌,+4[C9"Tw q`\˚0\ș]FcTNJ<!&q)y+"=q=w?P ݕ+XnzFلw!pN&g>Q%#WN)H}o?ሞTZ @6t$FO&3Y$G+cWdhdQ\ #&5SLi2`pW,<3EHx"4_/+}=X5d,)3˘?MƲn)P,wg)=\m@xƏъi;w7arj"a m8q\)d'lf˰D8^;<iҟDDY$b:;}@_.(͑NUNGSr nTP4/빞}USɸS:VyGRc]&EH[v@O]Uπ0I = a<drߔ6IeQqGΦ@0G4%v^ (I]@0{>;zNXGvK.5st[X1N2l$rK[꤉Jk}>0ښ[D) h۫Gl҈L@Xr ypNnK0FUzy4SGΓMOGȤO L˼kNjD=i::QXH~K%$W-8|>eBsZ?]f}6xHXi)) X;w^}Q'W|w4T,;G`-TOHq_V챇ĽӶ`)$6M6JcwJR1@wz ê:S֓CcS)Oy͗Ѭx|qZ[6gUFEoA!jETyVvRC\`;L5ݩar~̀FX/Xmg,>CόKmV{rW:m$3e=:U!:chqO6TuEAmN̂ ,>y%//]~^| n$pbmlkR N.DB0͋W ڿִf9a1IaH뒷ݸ?Ok$CyU3{KPRಈ㜈~EVZVIZ,gT?3L8=ScMnyxv6WV֘9E*5thlY *J͆UMtJo!~ŤK1U8tB(7׫A|g92 sh11MER2w(zfh6~O0.ePtk R`k4ǂ0$UE)i n42MN;l:a H0*afE=zqDU0W]T``ߏcvM*?{cZy ugws<o uWVWV9LJgD'Ni|&ٗ@5~N|VB`k\*1,)wpF20:73wI(gӣ7'wh9nĭO}^`X:oGp.a@Hod@"<Ό'IV{.f(ө>FO=jb`dWe=& ҕb >n/!d{'Q<R;=|iA\H(O\(S'iߋٕW ́^ȇ ЧKI'݀{egeF{ (zݽΓӜ'b~i9 JԾ{a\{\QB%p[Ba$#`m|3ً6*2/G c:nקpieo6NM G:QZ] ?"74a7DnBi,u_ ZzD6~v(NuZs(Unu@Md(;r ՘ƫ$͐VWOu}\3J W2tX6ƅl˘֭*\;XpNFT.$jv1.kDxffNOr0<>?4`6cR/_` +d o7'uiQmwj^"SR\`{Kt^G䝺^io*` }|Ŷt?mi6^}e٣ ؒ{AtVGz*qaFx}8EKd x !NXETDl砺 "RG80o#4adTp32߉%O:iLY|oP~w[5g'*bV/ͥcG?TidV ޖ跷*PlWp-$>N-BY1@S7 VԞTi_3ݵ5R i q 4{@dТ_ԣA{}X@Z  sʎ=ը,0vjGǗ-`bvVd&kӃRsЪAhη<,p$l |Y:b.($aH.gd#wSAGӱ]\t\6<5݌*#'!WC|A:g4C;@_¯n) hZ!D㊇P'g9#uHvq-8b@ŭrQ=WZv\%6T8Y|噾‚}^ [;e+kл T}]\qXA5+~otJC5}&]Alu/a;.zBkF+mBz? A#@gb*nDG8=FS7}6 Wଞ {+ GXjR00  J S fw,<X4"}1_ #CɛN#|8422'/o=92.t/P~L{7t1ljirן?G[)q@[U]'eݪ7猟cΎCzq8;/<Kk~P]:FOO> D6j ݵ9ξ5-B6ՏD>>evGq3EcqCy?jϥ "OMyFң=e"r'' VBWR\R{Okz6pj9E`D5&^[Τc;#WJڅ:e*F𴥌Y㰢da5̱tnCGG,^x+<A||Ѩ* 7P)0qa=#z;$Ωf`)1?OXs۴!Nt7y" ue!+eB5Z޴3!nSw!Q,Q2\ %L/?X5yQDyn FO 9lqV8d҃q(6;cannpEY#ƟrI13 20<?Y9P3]Rո/8E0뢕6K|<ErUA=SozLgɏ: 'QӎEѪ([1OBwdBUlj\NU4C6{cU͚ݭ=r W d1õ,yj!A@cb֮9aa ؗѢ\t۱[nD;/NnZ}xo>fyʠ"0)v(FؓMxp}6;vQ4;`,nWx="@%:ΪNi)0 aݖh,*1uw0mOݷ',dKpxgvQ,"uvv胱GЕa7vjTS l6IT|΍z溿 &]^wz QNş:Ӝ m@4lm-lNsAv2G$|nLKB^$%Pr9 H_?ĵA@r?!HklZ1h-V%W_n򪿮00vGA}~lqy֮~YreWOK.d0FhaJlY|*+,>O} Iԛ-Kv,=LHaʩE\~yg]Xlt:#r;LëG+bv9M*l@ɺ0Cn$4 :2"Ibϵz O=#dj H"hs|rE)>Ȑ6ӫ s̽8iy+:<rI/ G},ѝDM 7,-_ő`;[,yƗ΂l!]"PAAXoVm"!ɹNL ~FvxLE#1+2r~FF}tCW82gZ;`lRQYKSW김=Rr/L?0<jiD㨚F/xUWdY7#t]qսö#Y{o<+|ui|Y s-EA[n1QtP3& i&ױNx|vJ&+!GPGc`BE "mjks ZK's#E!oɢINOGrj/N*4/B$~x=;4D_.*s@sO ȖfY~h /j o(<2Tj=F"+y50ce*?mڝ7}ӥ%RU;QWM^~C<vSilkx凋#ٷm`4G;J&yA`V6ȳ"`,WcnɈFVe,(  JZʠj=V[am͋ZSbx0|#oyA1.hI煉J'$;wg-X82;Q2(@6ɧPE{!ߞ]O$x_ӟY m N'"zYPjIOC8{JxVz{Pl)##(Qd+MzA\d/Pv4 .4¸:WrD9jQFB<\R4oݕ˘ޯ%WQrE6J{v\49 3q0$?>Krd]9U g_(7tlxq6~c? F[=.-fROtmM'x& E;]lo!عрbV6g鞅(T-=h? #29v79I_kmk,7߽3O+9 6Bw $?e_;E[1E]dMc&X+`X tl(uRg5Z}[&xXѿ 8hCB03B 1\fo^k>[ĕE:eG}묭пb^["mg$d8섁SnBwq4ΰ05oVĻ"cB|ZA[IȾ:H1ng{o8ƌpv|6At@Hj.(M ~:G, Lr]4DYcck,Gp*V~syv :'!5GA~KiyFAV K;;(e2BYъt0}h?y6tR4[pLrBr:2*9{zG>_eHGB&$4HJ;ֲD>@얙=_PmKq!着j$%kuB2 ("rj 0>sSNrj6ц"]1^|$]fO dAo'PΪTlk:~yi_FRЅ6Ti"5}~dXWX-WL,=ժ<5frs 9eJ\z_IOX-ƥ*6LU3}Ⓙ?B3\JvÃ^X4~ƻ6uќK$S2L~NeVŀ 8'a| aB⠮KD@<;@8Ga/W/,sT.>|.2?X/Q9 #mYa1O2 u8i%!7whYa[/dt9-q<ŭƖ@X^3E_'9[/1/]sL.B <zOϯIڎ^ )=ORKGŶ4˾h+mY!^Xp sd԰dUxwYkAt|:ܦ喠6y]e.}V?s/t"8_Xg]h!0h rҮq*HK.}vƽ17x*a#9l2Y:w'YG):~p=j0V<m 6M<,cuiJR&!o< VUqHŹ m߄>[KRZeI0Vn?-4%8}iN?խ¯zmۛO6q8) ӝajlv;V?hI-_sdb1”c Zt&^dB.YH`qVՅ5ήmGnG>Gd^3s0-Ndp٦3 :z)Q4'b./ZzKdߦ2n=Wdж,=$H@Yuk  j6(c{ZSކ])ݑ,_^sx tNؒ\~PbH2;kY{VU%+v~Boz=W1QJ3a-d^ˡPwf>a{]m4>" t1j$MN#Kq>Tvk ~/dMvDtkX F(шHлgۖrd5!?|U]r:/c^KQTJEX{>bay%89mTZ<oGj+?6c/A2Fx۴Daks+o%[M}! )9̦yTst%Pf9/sLHQwդ[qE .m&kŊ+5Kg5HLz:-ZdP2`Z^(P]G!v :HiRbYհ= ]HG7eA }?u|gBAYƷb]LmCEIW0\Z-<`Tk(jd {bVu;襵̛?h<ѡ uA\C2U{r'͆ Y0Ю̛x7O!P|uLւ=P0krxe ĸB<Dx'ԚFv{K~'G|0v4*|0ݔ"Jܷ<Pt5A\nf,1zdki9G"zuCqMUu"=y ~}U{[ޜ.IL%*z/6(9EcbYHYo̱kepn<QF9fMF?1on{~rWйfYmm˱[bnsP_'KyRzړ&ui>%_IX- V@XX=W}A2|2 <˜$z,tP*(5{ LɡZ괈&DTcwЦn\"Pr6 C0z@KDfVv(n%dߑ!X}hyg+IgN`a7WH>cMփ;m_覙[Ld(lrқ6}Gq3מJjڣKRWM}Yvz%w7AL\z78Қv fm-cڜz'+FQ*ȸ'AB`m0Ly캌 )P06gm·(ۜMVxt͹'γ`9<Ӣ͵T*2k)$HS"| RwQpaW@ z)ȱu2G#.Ϊ{$}{}xzbگ:c |T&O=$n AIu&_>HKNmg7ttd2."ґnJc̠(_pLJg2Zr/ \QAf^e@Ɠ2 pK`Nmi yL] ѡOY_r[h[9 cB^S|GWM,Ό{tI嫯/bjgvx吖W,7m"^ZހaQ^8=zA\I~,9` ~Դ_v5@0]_{"tQ}\Q? pV୭V&EXv<PR0T1L隣5z4+ܫS?X\$J IRE-vp![lWf|tM oI.\DU|g?Jt&tY.+7)7Eα1K<nɉ[jnnL[tLJ$ Y:ղw-3P/k#³x{>,3tsXqbws^MoUS@M2 r1^-(2rMk#)~T`xBv>a=As'4[j}Q_Gx5hHt5I ^1mYKȄ2- -іߞ>j~[iD]%+xl&M}9A;Z)5{@߼Y| j,^2,IiN'p_3c:G'!y#geq _2uzWw͎< laړT!;gi4y(HD%}-G9aa2}SB<+ArӦgAUuheNQf:tߧhS24l,v4SVڟ;9z<Rk ~62(jJ&F,b8M+ Z+ 4֠_ucC|ψUi^TJs\a,XQۋih Ȋ_bЊb oROD " dZ"< b{C37IbX 8 Tر~\PekR{]ܰIEbmL{w~G}aGZ̐sZX*G\r0qѪVBjkv*"7Ҫ= $[g=TDޟR2DlkD`LCe|6kxV%;c< S$3 yHa@LlQL^3~ȃXfتmG*Q7%T_UyOYw*#cGDSlܵfNUvj/Ja>}Z nߒRmLao?bD;Ք[fHGo?%aV>8k-~1 QrSPK7ͥOYzmZͧG¡7L*T0dS*wpNY45W t?+wҪT;بT/%zJڷ11--f*SܛդZ0x? -`jLY[ڠ'ORY:tlnɽ^$"RR@p;kGT؝.쮓gN!KZ/ZuF:\yĖ$O,`x4i0iۄ 41HK !Xth`,6Ҡ ~6نHBz<+Ec}/{v;ɣ5:oHw|$C DMIK3#QCb77S <\_%6rv';_tRX~WHY>$p3]{ >9i!T>6jK% 'U*tv̤{. VIG " 惄т~8vu#3{] nϴpk;Sd=VPO~>[lHmѥ0UK})1lg~'7K6M(љLrY!9MZqR1mctƊ"ӓzn [Eb8lNcC`@0y `"oUuGcg|ܫD+CLcѩ 0x&NQ.ua%B/ rt_ǵ=9MilZ/5A]ҵUMhxk[ Dgv%Hޘ4N-Ү)k̸[<|LZyJCRPPjCZ)FSDxoCcCwkNi E+I95$ah<4= ΋^#vD^_H_>^ƔI t{M%S+TC+N?3&!'ێ9#]}!ugBP`lT[C"$ؿI"D% 28 Ekھ'Ho$X/bf*S|u5ļ4+1-\RH+ Psɢ'q˧eY-*KQpQXx6|k"W#'Vb-{@L8`Eէ>Rv%VҰ55bb|O7tҟ*bMO#rƿMiKRVz9SSvS:` ` F tu#A]Mf!{gNx"%I {IpV\ =|P`e[@:$\,K=ս;t.{[n:e`O>)N'%k'ϢPyiʪ VK:Lٙ'T\1Ag#Z/}۵F<j^QNW8IY{K'># VԅG'W/3x}-(5s_@6|@ŎyH^3587-k&]]b+·eZ|@6/ozKL,_QS_߶}HG/Y^bnlq )5C9tp[TOU}~(}_*y1J b= LƝ ?ICyz1NiK^_ 1iDl "8R9L uYU+Pi\ߚ@Oj7Rxl]>gT>vlձRc40m߉Wüv9r**U~i."0ן#~\">P2rnC,$/jrïREOVZ Kbd䥳~{OxI"[,YkGiR&/k_~ )g3wBSDϰ/"T$IOK0^YQm̡Dz0B GNi0mjI! ᏼ?]E 3.w[$aʣ9AMya /a6M‚MH1\?e 1 {C{k 7%q3.}-үAD#k7oFILM|@Ol/5{K oog"Dwŏ e ɷuYh!% +Xv[T1"}b9U( td=Cڴ,%|)_MBgP9y~CHg_ʨ"2D zUbQ56T޳}g.k#8/H?sGv,aILuE"TpG7SD"b"5JcFb,J$E|~_z|tfy RrY:wչ 6ь K B#!V.<4Td Ug5e%\=ar Ivq_z]`#ϊy 1W kx_?-*ݫQ a_|sHⰭ)üI%0EG]]!AD-69<LhgO>in<2YV1G؊NO`|\CEMx7?ʥI%$ O{Vմ}]͐-W1M낛f}ܸdo[̰<AݠGţfn/u!mqfFT?(OJ 2ۓ35ޥoj:Aeq#l4|3((F'\xSkne`WXt7*kTCpvV∲\AzH<h>*i\@UY*ؚ}]_y?ynɔ7͚v2쫺nNO+%CT^?MCbrIP@/ gʹ:kDxVRPfP?#FjT9dgZ;UaCH%BuqATzk6m pּ͟$5ytĚVWeFU 9L>~<5(m'su7arCPY|3[IKEr0hNJ 寔558ffUX~vr4482{{8"B"N䔍00?AɎQiU/Eø edl/kzdm7:;n7Oz2T,VˁW.B P4<Gձk`qXܰϯ- t b(rI+%Ŝͷ`3{S*B&Qh2IFwUYD 91:%p7rgFQ01Kԡ,}WrL iE>wHXօ#o:8~+rm.'sq#֚Zy,6W IAFp["`d>HZP{&ߏef:dvPc"OK}:d/lhf WLl2!?dbG5ZT;_'rCy)\> Ry>^2;}O?27A!9Ibv6Mn'w5)!4ك9Vu(@-a<ݵ#!u? ,0 2DkCZ=¼6P~<<oYdyZ!R>`I^DN0dHBR$x]H"([Pʭueei:Ղ(x&g Z@I<Bsr &M`CuPM`"3g+mG7%ﱘ&<,y|J[ع3Jg10C_"M]B w; ؽM1.wdaj#) (>,h| >lY`1(B$WmU_bsWUdmΨUЪ8ld(WCt+*8REv2ƪe c"Wt%ti7R7`4x٭ѳ*a\ͅ,cO@ ؝(e0W'4m$קK}ïfctji{ EZ TРw6ixڶt[ 6]]>R%Q1UFǀbՉFJGA-x :AA%b)hB(Z7kн+mw3a< h 1b88 gǘg;yր%N'ʾ Oczb.q-oUiqE>m>lN 03$ʱ(2eKN&Wm3}:qTcywRQtjрɃb k%!4Cys(V9(gW"~AHG 5]+FfzlKX`mO$vw(qY_bMej֏${el* PFfϠ,9#zk3,ChiIp1TUywC˙7uFV^Fqd+hj(<N;=Ep7NwcX='<p)֐[-dXD03?#Ǎ3vidi@ $x%Vʓ#@k~h&ߞAnpP̧YdqNclF#:xU"PX5S@NGl ~pL+Lnj0 j7zKMv<DUmM8 S:o1+"`Wc똒ݵ^<cBc*VR:y(D)2FW>=Z@H梨{!4=URRoLS66+ל>FWq>{ᬨC͕W\v1[֦9fj&~}f,&GgQ*QC!dF>BvͷR߂d9{i)a`81p@䴝vj|mjVw~`&@BAOXz*q*KΊs.(1F/|Vz(sLl&cŔLDG>P0kx!\SᏚxFsCb!^)n%X.oT?zy \Go(\ChN=@Ha-j~LC۳ه ̒Vnh(̾+u;ЪCO( Ik{;=`16^i_p@< aɱY=bbv3:`(AWpN99fU8vcs k?iep[Hqq2w%Бƴ(Wl[JF&@@*%qs ~/p~.Ux)cD+6O&,;t B/&jE+ōeTMWzFWL, in6>1^~IORo7U Us0Jsdv dFi΂Egœ>DZ[F˜copR^:y"'9ȴ<gRu]5\WN4'j"uBR  bPs-_Mɖ᳛拲NA^TT}GI+_EdFh0q)YfVIY?E?wTa(GK̬XB!{t9ˍ4d8MoCqA5޿{a/, ĽbE؝쾣7pm p{??Zk+˺`XJlUQ5q̼p5J{ئHy9=~F]q<kCBqA %]VFP{ p VN#GFwCKJ& +1m0EJE0Hh*Bk(#ZL>}%l4I A(?޽ J(QRϙ6rΕ7nMͫ')0Sfcd%प|n{)eWKyƇ.Ħ6WDjOt+em%צ|U[15f{gjf`o!˳;f*)[B&騌м]KsE1C'l|.sMnS=MC&1}z3ch:XmzU 7ၙɦSU[qܑKVKC]*X{PZ(D_Yѻ|*wn­ D 8JN4[W *ZB*~q۞h?K N E QU|TE %M;xd:x.Q9EF. f4)NjlM;+6eBR8S7bK^?|),CEO'e7/md,B.Wy/i'>~6@4OrbFUuql$(4<>.Vbv3Үn (ĂM^v@ ێ+i ȳa.Ygan{S[ofsR׃^trX]tY4,+*)/ 1I(F83@ja-!cab(1Bs@8gЋÍB7pnrdwo|yp0=Xtu,,"kgp^a$W1$lw߷X S4* ^2By_H}İLYU Eǃb尓~-zSNȅ 'iT`*: M!wځ5g[3AW ,p{H1rg 1-Ы̆N5 5#΀]6K/#Hmπ%4|kllmPNoQC},R"]UďCm|ҽw[H~ ,b|IbGK?J6NC4KwXhbAC@.0(HVieM-wLŤ&Y; EJw}}'xB8.63{^Ρ,e{Op&^/;bRnpTqXtw.?&Y!=`e͝4r$-NOqG Mt1(űՙg7U6ׯlػA3/a})iHl 9xl ^9WrvheaxeL[Lͱg̓c3P}+&"C,Ğ ggR; Xu/fb3>|S 7xH&+ 9$ڿEςZYVsla:=$N.tLC}m_gPn;qhMYHj2)%.loJUtC!s|!yb=$t( q|VWJ=_nden"j!?b9|J[} !R+?[=^xW-RbЋq;Ԁ/D,B d~g7'-7*n l|7DQYЩ~3ԕuEx1*@_blڦAR*54ؚ?^Rm4g+?X·(ݣϱq0|*X;1U K FV&rra=*ګdaq6c^c yń-6*{'&.c*WGi9%>+f,!uZMk#Aw6s[<{9f',:&հX};S&> oU-" tEMDWjO϶L2{_ L`v JW[2;EC7u+!hnC)=ש̓(!FUN':y2C ›b \.[taLHUP Nq"W HdE LnB;߫Q/u0Td!9?:A )S2@=3d Dн?IF23Xa -(Rrc?'!m~ohgY6_`eNӂK OƛIo%}aY?P V2lCHtL|tq,Mi@ؐ-y%I{t&gB(HvU9߬$(Vjɚ<]>JR9ogc>1:Lɼ+reP6 bY ~'T;<X!ŵq@DKPwr+G蕔=g\q(ZR[% o~2۠IOy)"D}*sS؋~E01uTڟ"lQz&ۻf`KU#_l@gir+ a"'x$fZH#s><]]g;!U~}#>1 tÐ˰;βS9K$X˄의q.&Q'u쳕[P(e}ӾtX bp0I(oCG>cqUhc8IiD^UZBmN#󊢮Xo1=Fe%TǴ[=Iό{=q8:Rat5bRڑ aR" ❥/:.$:Ly\@}a9jwM}|#sV z',`+)1I B/ mgu)brYqdҢw2e}2`B=HJoN6 5%N  cEaKlQw#c !{+%ID,@~-;)@S-{s19 Bއo*2[|OciMCBDvyCpD#Ɵ[e 1H+K-L 8X~u`CCe]եLM݁.н NgHeƑ,#loZO# x;&JϜ[713ճĽ5eUu=1`nv)UpB!9__ߥz`ԷE{.Nҙf/ݢP!/ &s86tv*U>qHݼC< M$cgRKiI_սh@ӃB g.f\/mc00wL~R$^Lks^5X?V:ƃrD'1{hR*_B~8 q(l]ֵ75Oc]֛R h m\@ Ѫrdzrlsd*// Ky4 S}d>z#B<!H^:@Z<3TIc9&a%\ i${ԘCRV z(fG;L`89oz!ڸrz-fg\ |y9ajq%=Wlw$7 ǃPtq.qn5EK,:+QwF; {ժYݜLQ>*pw)jog3I{4s@Y}w7Jmƺ>K`~Yu5MvdC<}˸l>MˬR*RZbC `TƑywh8 ت*2@Eܬ tZMwMޥ>ۇKMk2N֟&qx_qOע.Ucֲ%[K>n^)櫈.\9ַBE *=EKWRC5u}[ K}8BaωTYgʈYX0乯 ֗ۊڡ,1h6Rh}!#I;볕vX/U ^#I r :Xg a~;m1gkI咯t7*KCYMYI ,*2jG(!>"|űs,4w`wZ׽L6 "}g7V#}ċ^r>"3=,,`k2:)n L:\!\,`<SkD/5ХP=x]rçt;ù?Wn4#DJ ݓq@SOߦ P bN>e:d=^E]Iw3GkTx{6TȽY {< ok+x2!RL2QY"qV/5 &>ljX< ~SQ$K< 0RxmTzE}BDj%=-H_6w3Zikb]{݊Nh jm˛ Oq̐\u ]ҵ0LI8lyy΄R.pVu8$zwl#XlV?:$N8HQ|5 }x=MXU,ox Rp0t޸A1ar ة-Z*5=r@=ޞѣu[)Ū8.QO]_಻r?;=2?#O>VkPXDuJÝSmK':ۥ)FU!cq1gBkv/m{eKAxwuhGFnGw(L,c/,,{: ٚBu_,kpoe 7QVh(k %jI/zeԸg)!U(,#9xE󓀠Y%f g$*C#!^H4 >;$gvJ~${*Xa|uaߞQ}˜75Ρ<Ԫ"~NDHՓ0: `A[ј&4BS1j[.hcc^n:y#0$ &5ya`6dUAS{3wݢ}ײ0PFa}]M%JJM@Ise8?ڛѰUE&{!q"0;Ϊ !AͅIL-G8a2c}k?+p6nok]n3 '$Ż+r`wv阍Rw$fXhG²[[wWSr P(W;F\hnt' p6eX?6pނw72F%>@>-1-uojF#u=Bsga&N>3jyTY=лx=NLxIce& H|u2&H~D 8+PW [/4 l" $gɋO);<&9W pZwEnzJrPAsu+Q >=^*CuW/rKDc>_ \¢y\KKzH8ުbl. {$H֦lH[ <Oeg`mGL˸XC :Pv$ȿU匩><V+>.TL9!Ήy<7Ĉצ?[ @VOclޒb!C!ic,zA[*(:Tg$1r<oSu@Npd`&8ltXfP- QJȥ Fbl:Aϻπ1gUkDnJy>g}l}c¦ٕ3%1 G~+ƻ+OOZq(')\c:7LM-%=70Zp1d;R-*OBӡIŏ+n:pWJBAUG`ŃF4Nr  [@B[s@[IJoZ{`ZbxiqANQ8΀rspmY?z)YzZ+[\ [}Ja=IG[=ZmG;ppXVcA6=:*L%oE?u[I/[tHۇp3JbZ[BOZdGEhGD&DDY6f̭oqpL"[ qKY`CDQ C4ZnfmZaoX$DhoOoZpUro4GD<rpsrq()\?Yq[q9Zx<IHdZ_9MOrZD|pJL#+r5E`AٷL,LZA: "LqY8IŭGvq3[ΖDbJ9M:rCqz~[WYQ:XTYr}J1Z+q&IښZz;I:K!AOjq9mYP[6F=G>*6\psFYpKGZ[9pO[HGZ-Za[ D?Ks RpeR6qZpp|IZAJWKEoI5JGry{?qq=>y<[&Y#\CSGdO9dqH3J@uYp7o9J9srZNHZ0-YkqqowF֡A^cc}6rjrZG@?Ooq/I"qr"onoTxo99@-k-LmGyGC[*!^GKbI8Jr/DtМXu6BZLbqm<LGZ\^L#2E[t[d$qUYukJD*GPKK1)YNqF\c@oXPZpYXr6z9 :ry[oWJpLrpH 6ˑ>)Y[DdD Y>[rpZ<XlVDg=rzFD*pNX9 c[FqJ[5p*ZZ]Yr$Ho:V [y;Gq8&[;9J>[u4[qI[WZq[3qDgxorϋ6IsWZ)J,)XB8 ZSGo7LC7pjEgwf1Ew ?Prq_\*E5U6 r vJ`YsIIJ1yL pEl8q EJp\Cr)i@H [G#EwGiq?HeG(Y|XxZIrJ[q!9!6;Z'r*:[a?U@D[`Y7Lir!p}LpcSXVL>[*qEcIED Y'BrZGYpCGCX{G=qEZ{DɭKE[Y;n[8-J[Z@poY(D[f[rDp:dC1gLBIs ~EZbJŻfpG p[Kbpx[Y@+:Xs,7bZ0F[YD8'<W[Ar~? ppn]Hp\0c[?x[6r88I uZgr J9mH$ZJr[W[!62@A[~%[JJPG0pWeZA%HWFQ$Y*H=HHGs RL \)/I2qM[Ȩ@qoB%rEHGD(rzsxvKKDWYr_FD_FKO[HB(r7YF6r+YEd@Y8خ[+qLsrN=[oK p@ZcYGpYUs*pnXK6>C!q{H<JN\ T[L8t[J99)Lm[Ap7x[>aqLs[9D^&G2G]XՎ\IR@DahJ@D)KNHKFѡqdr[uI`KoKpaLELdbFpJksC_CIL<SqLI2:V$Dr*qo؅ph<HZY&p[ZYXL"GqLmJZ[NVK9\DODlJ;N9p*KfHXZ6k90I E^GF5oK*Hȗ[L[Ԏ?"L7CL0 @[?-LBLq[oLEXbK[(X[ JA$q*6\L9VL{xK2J=6gAAK[kYxlDGl[M\5[z[(sES[SL/Y[mLLJv[LZLCH;Oq=JZ@p rL@LcCHZGIr8pQ[so9oJrrIZgY[K8ID>yqi[ L:GYq,\3I[@SFpY#o%@V`>}plJ ;o,psOdf:[-H\dJpsFP<K9rw[,=7/Y#_Zvd\;yIYE66BmqF-arFoY[Rpb[}IrYVE4YqkIkrtVEoD[FY4s9pI[[AKȁ<CJ~rq%D$3ZU9}FqY[rōKz]6WGppRNr^`[Ij7EZLp5|=A:gG=\.{6xrUq#7r5z@[L`ZjIpbI:D?ؽJZpDP8"R7x7nsp[7LtF`.Is[s(Vs/Jo0pyrKGe+4[[pM8L,IL/96̜IKU [K qlq9LJG YYvqc [7*pp,qHp ID%DĸpH@Y]qH@DY3ZYrФ8jYY_pKZAZfCZ,[ lDrL8Hrө[Y/kY%'JvI-?lXѦqy r5I;x[A9KGIPCb riZj^L1-HG@DYL8s>)Z_GIM:iqzZCZRp&\!GDZ3YA`1q,pJo Z);Ntrsr6y[NJKYKpp7_[XYiZEyGPL.Xʯ:f?վ[  )[F<H/rٓJEq<[GrsbJ[D )9o*@9DL;Wp^:oٶ\Kn9ْ@mv[pHHJrI1FoZUctrÂL:p[[Y:OIZq<FA-xrIF\o'pZt[ǂ[}J+YfG`idtpJ#JqrT|=n.Z7Z KPp*+I%CwW!jZF*bZ/K\PKݯLr\#6YX8 ]Y[UYrX[7_Ic[ŚCDsMYCQP6C[HJ>HZ*YIf[`GrNr:r;?[2[kEpxr'`[ޑZ.\,[t@jV:F5A9,9&p(Y[fD` Zx J \0HoC2GZhHcdJ\+W@]Zˤ[rX[q=p@7JY,D[ҫ7GYkxp͌kFGIZKo[קHGoZKlpAkrHZVq78[DRFY=K6VZ7Gyt8$Y =GonXrG!YEZ_Z[G^r<cL#u1pA>ҋGJ&4q>[9L$:p6L Kp:MYWp@m?l_Jӛ[{q87r[qyZܦL*Ak,pG{XeLOrpn[#<q[jrHDZLAKoZcUHEY/daZD0r6$PF[ZKN["G[L=sY\0Y[@=="o=<pI[DHrHYdZ!fr%L6JqDDݩrusnK[HHoFJNGD+@eLJ[0;lTqj}q=8Z@^@AVpb:DqoYH07Zg@[Dpq:;Lmr rхF~ES3py:1KQ pS8 9RYEpnFProeYH6-GZ9I״[J[_,qZ,8n,E[ph[X0:ZL<['+XI[ǮK+YDI7yJ*rQKCX;6=nKZpsJ3Zfq̩q'CYz[|po[ k[,zs[[?YyX# H:Ji[r([Y[̬Jtqs8˸AZrDIYr,YGaHړDj7ryrAbYlqq,JIIiIdQLA8rFnrDGq[Hoq1GbYu?dzJr}D56[rbqF[@j[_=dm"J/|:9YD6IeZGqάH&}FD[Yp[p` pI=8FE[[-GD[46$X3Yc'rHzq9Fb CuAK^7dCjquX\Yܲq"[è5 !rLk [`3G})[jo(Cudpnrd2DmZJZ@N/Y")[J<JJr ZAL\IAYK2r$I߲[Lh.Jg@7oH$[G`LH5q)Dx[@HZYiZvJpZTtZBoXKodLHr[@:<H6HoYp]IJ?[[LsrFrZ2pqeqKrKs[p d~DG888z}r\Z2ALkZfJD+[YW8rNX?YZ[ ZaGvH?LjqNJ V\YrmFoy[p}:prFk[Xͨ[+;k[JYNYC\CKc;NY4p@>&ZKI[IZ [Jy6>rZ#.Lo>CHI,EtCI;oUrLHpn[*EozDfJAPY#r%`GZrL+3:|rYPF;EkF1@eYoڼJ$gK|AZKn9@K"GnIqDI̋4swJ` LMosE[;#d;[6b$A:٘=zoشGL/Hr9'pL#6Y'xp~rqhYHL*G[ 9dm[ YnAj;j,YZ-G8 Yba>ZT[DpLdLjL pYIIHrrs_o>LHFoEPYxG Y6sMoqqzp5=}D*oY?KQYL[+9=[qC\JrrLpZLpK@YKRc@G IrGGeKr+GY,7LF4(;6[/>-?dXLAYJH[{rO5JСq[tNqp6 [KopWop}'HXpZ:XY9*L.AVCI@Xjr[L7AjGH:xLnTYGO6=pJi<Y}JlI[`qEp$uZs 76Y?D>oHػpZ2yH֭5q YOHq[[,dD9u.Xh5[)rXqbf3rqzQ(\9*"Y*LIJ_ CIqL?SZ7X}r0duL[k<IּLJ!diAH8fG9RX. qHZglD HZ#Z>JwCf[t>YI׀Ltqyr:(&Lxc3K٪kqWdu[8[@+Kr8pVI7[Wq>KfYeF0oLCrJ:NKRqs[p>DqXD?DqKRZKo?qpG9.ZD@ryrsZ@~o[HZ<DxppPpqar@)FxJ<WFFkp&8,r.zH\I$I?KS p{rqr9v#p[r3LD FSr:{`LEK{8KG؞pxY4G/(ZF<Z>pZrpmq?p[`oB9m}ohSq-rJqZrpZ7KLoYT[47vNKx+XSD*9_JrFosp6Za;Jg[A /q NLgLi:xlGE(YHKo0o[_Z[[kGFos>8[lH[@=dGZo.SZ߂A[p.8Bpjb[}p[}KqL99~L%opq qKPNp+D[HttqRLIE`Cr{Y{JoւLIӴYsZY[t5A^GprHYGh[}JkG HsH \p8*SD?_9F:JZ>X[ QrI6[q)IH6؆qI[Jc"rpA@DHaJXL/[J6K0q^KkL,\oZpQZIpo(FQpHoop@ЂDNY6́A =AF0CeYpq(F҈[FoNr+:k}L$IZwYp[C=LEjZ<CL;GY'[JNFlG|8]/Z>SLJprH{oZ.DL6IIOZGz;EEk[*LY0HZAm<sg Yz1R9YOd0J4Yl,DGi69 Hp FrZT[LK;ESoYϙquLMJ5F=[@LHsErI6AZ&GsK=pq:F"4Y{ZcD7H L#aLp;pKMJ/q[A[DOp[Yy[qGq|L<ppUGZx[ *9XsZ?AXhd&[JR[[ 8{HxDzXCHr[p[Y1YrB[8 Gr[B<rOq9tJM_J?,pp_[[>"ZJ[up?ou9 GqgqpdHYJCKnJeG3D spsrb<V\[AG Hp#39 Ώ;l=2=p[Gxr?HĒLqP\ralJ)[<rmLopXDqߒd[&Z(-p@ޥZj;n6rk8Vp%zp: rVGlXr"oroV8D C7up*FKJBiqHH~pCODrE@7LZr %L:DY8I[!q "LDxYUH1JnZI8C&p\1ˎ=8I @Eo9LPD*LZv G{xqJZ݇DL)p7/ Y[dq'LpfG<YsTIReJ<935H GJb#@H_\E rmoG@rԪL ZFhYtUpyLKpdLqx[=GX<0#E@E|Kp JpGLO<lG:HKRnZxp q:b[џ73<p[HM[PF qUZ{rL5CD8rChrq 6[gr_Z[EZ-[ЏdBpFrKor8+[LvH8oŏ[rqY6GqϷJyKRuY?_GAq^q(S[oѬErEvI?h[F=[[oq1K`[Fg\J?5-6L,PYAIHZ-OL@N4Toļ8H[}1IYF:p FcQ6*YDG5Lp[([8qZ08tK;rJd^rZrT[zZ\[Ep8[Hf[s` IeAY][KL7vZLHZmpAFLPpBN@:GA'[E[=K5WpaKOL?%L5JFZ)oth=;I5pt?s0nq;o-6LmD Do>tGrYCAyGGZ@@IQ3qF@F,9~LKIq;5XCqJIEUdHY%6[>M[X4Dh(TLH9([["dLGvJԺFvYED:Md<D'[jo.J6? [Wdr4DVq%#>DH1GZp~hrΣ[YBY\GG.7c3GH|[Z9fp:G[ՠpJd:7opLNLfo:L@r;F[Fg:FYqZ=90qr[PA%[h7mCHtDJLDGM56yGZ<<KgA5ZX:UYQ8"Yqa I3Y"D[Y6rrOJ[49![ :|pq/u 86EfI)LlC@r9:i=[6Cq+rjI^H#[кK\=ZL) L<+EoD[p4K/KZ+rvp=uGHp1ZqF*Fhpm\#JRAGd6[BF^o$[Z [{G [ɰ9TYzpsKM8H{[+q XϠp[YKJ_LI>IyzTK5Zd8A '8Lnp [eL9>fLj8J$qZRpLbJEG7qJ#|q0LMKZD[IcL=m\M{JvYIZ8aZo9Y?91CAF%JITYjv{%p[Jf[7TLNXLqi[KSqZwr][Y{k7:YkI?`dNidX9\0G[FqK4F_rILld`Le09pr<rpLxpzZѕorcE+[9JQ[eSEK)<F9GʝJPEsD:Y|ir/L!LQ_[ٟZxwYKYJYI I;VJUA;(dYZA,rrv20Y7LfHFY?F46zCw[#AC=Ы[[g,[?'K+rxvE5bDsqX̓q- CIorY[-m[;#(rrrՅZq:Gs}:ZZb@-p !Xj[erDoCq^9 pp[oFp"us LiDIL)SF"=9Xr q&pD@W?6=Z8XLF[_3?X+FNr [h,LIX3oXKZjD(YrL5wZ1ZQr[ q?vpHGqK(ZFnqrZY[V8-RXHkFtdp9yo=q8Ju'\'GHFf6oWLFdq\^sTGYA[}CS7ZDqw[CCLkp/[pps8KA Fzr;Jpo^K9o6q,s,da][pLApK:pj0A IJq:[YGKEYp+6ˆ\!9K9KKJoL@Il>so׉C>[XDjs>/YLEdo[6CKS K,rDjJ{HLOJ^GӋFlqYEO8op#QY4FhC;l"GqqY8֢r_p}AKFllL7͚[Y[sG?[l&Cw"ZqD>)q$-D '#o LrpFZAHo@K KG|YrMq+\L*eY@6rI[~s!6q+Y\YKZ8h[pL+HY7.ZElYi8;9Jf>DϏG&X.L:_~[{AQQ{d\?F)rs[b2YG[dE[[.q]Fr&LDpDLkXpd!\!KNJJ(p!rrrr[GL,(rU[a G˻GL[L[ K0HCr=)F(;Q9"\G7\7Cr\@AK8ocpq[GZ"r'E#qj@H`-HJMsIZ EqYzXp3)7qiYZoY|BDuY2\8u8=KwZ?4-q4!@LYZZ9p5&IۚppKNjJYtzrYcoLt[~;5\D(;Z͠qhs [Nhrɘr+FYqZ$)[%J0LE0q,oqL&Lr?C7LH~r/HJ}6sIr@r[ӭ[C:qj6_Y&GIiEp":7@B:XqX^CEY:cq3LGpd%JEsp`[ąZA9[[{qlZa`LgH[.q["Xo/YoroJZqQ@޽G7GZ[EҽZeKLZr/Z\Y|[rD'?E8:ZiPICsXD: r[BqzLg_Ywq @ :57o]YGZv[ G-rZprS}%[slE5[zqo7"GL2Yg7zgGqI^FrVr4E :jaLKMIFXp ['r]h<9dZfqZxrC6I"=K~Y7` [Z)>xXSr5F@Z2?Y~*Rtpp IHJJ<DZ:fKTFEpBL$r~9%r&QJDI5CcGLrv[JKG.K3zsZ+[|M[r[&oW[L/A[FIXGKIj[e7&DGAhA=ߐY`6[g=Yv:h:r~G{?I^[MD4[b[ZdZ,6Z GDHK<D=hqJ=b>H[-[tpa>a|9cZDpj8ؒBYz@HINplCrZ7+rUZbN:rYZqLpB\3Fo˵LsC5C,oLlII%rLD ?oݤKG[|?y9<Xn$DD5AOSLn']sZ*LF?G}tI3qK&rEJ9;EH9[r;YZT[Mq1YrFodD$DDZ<A 9b;pH7wY,Y[GdZA&Y[9bZg[3G\TJqZ[ep!,:AL+o*q._JDKo3JsdZEVYr_EԱ[H$88:qF7rrL:[PHV$[P8?pG:[8nCpFoXp[pSFlq=qZDZ"D=[)@]vY4ZKFLs@GsIo o0[/rf [KHF,ZHpTrOYfGhSLJLq [@-9=r~ D.Y d9 8f[;p(\oLYWqv-K%dFtEI[)Y]t$GcL4 X/KYKSMG`B?ZorALN\>DJKK4sEBrJEfhrmES Bq>qEdsqW[!GopjVrF?5qwoöm>Fc~=bIp"Ff>:ar=9/2;JK62[vIVLLhKז9lpH"8ϥdT)r:}Y$L~vLIqE[ЁD.DyZ#ZpU'Ij9<[G[_o8O>KY=Z}DAY6Z[p [G[or&rY%[x7J/GA1Y,r/QI6HIyp 9ZUL)^Z@?qqpKL1D۰B qHUGXI: 6FhxDa,[)ZFL@ DO[/QpEY5A݃@8rz}X[NZ91[[!a[@raGrZUD8_9d;o3G6[_[(L.<h0~pIAEkJ:L rYDp3,Y@(rY*A[Y?{J+TqmFoYEqE~o[Y)DJm*6IJq>[K@^>7A J:hZY"G.q7Ftpr Jf^qGqYKU8uxLzZŜ@|I oHS\*[8I807[Ge[ҀZD"r[dJxFlIuqCE[Z8p`py[[2XpJ,YpnSsDKr|MZYZq=b ZD)XE6Y>D'K:GE܄[ qp+D@t[L5:Z~\8cY5cE nH F[C:D[h|HK?KG[*qUoFq`LGDIV @K;b?e[JKI\F&pE[s˸EHs1Bt\kY?p(Kh[9-[[N>XJpYrbZmL }q3FfD1J~YC[6[J2[XEa!;yZ-4r1L6=p`?CBY>[}D:53Zv9H;J6M?PQ[~ FMc[U\6'[mL pp&pLpZ[6FsOq:I (pgrbpZFpq`5ҪD@YYuC9X}s.;>[N=)?tY CE[<xdk[6p8rzWD-vF-pGZYIE@@j"=RDqql,:@KT[hL977DPrZYg[L?Y:U7wGg[[G@e&ZdX>oi>*cK[ZπHb9!qDpxFGL.#FrDs/rϷpMԈ:F\G.qrȣLp6I[YPG8qLLlrM&9K{Iz\F:6 C:'ErP[%[ʦY 6KZEFL5M[#H{@ydzDLn|pYIp 8ZgqZ*sY^irY܆qgoR [Gc\CrMLg[|YUJAs [Frq7KqkHJopLhI;Qys(JpGI!83d^nq&4D\@GpY YvAG|c["qszZ(QGLD7?XSo۫dq,[>CHyZYz\7JrL6D*pqL<rJYqt6rq_{J:%p J5YD+ebZGȦ[WFhqAD[KtGrpGoT[q H^{ 7[MZ~aE2Z8ZZcZ)DfC,::K+JhwhpGwgorAiqaIZsdxJZ|\I±<ARqBKoqXBq&_oEoF 7X@$sJoqqhp)=qpkYf=YD#YZ[YDA]tD!p}KJ:ZJZg6Ʉ[$r[pYapqY;Y3qQ F,pGPH[X{EXDL?Y> pQ[͊[e[oZLk-EKK>q>pG2YIٻ[[Y^DǙ:qkYGK3[F[[Za9I6/7DpfZL=?[OpJW<Gd*=B:K@Ip[Ӏ[D=R@8T$pa:Gr@}frX9@[2[}p97X[߸kFpG&CRRXU6=pYI[#[8Yp4orJLh[YDIr[wL0=UqHKZLCoKqir=p'[;=p[\[HJD[*8IZCGy[K'p[YANJyIY[>L!KCq<;J@pq&DL1n>*ZGo~HL>Z?[{rKWZ>DnI  YCJYHMrjFGZ٨I2r=[LnqwrSG3{5HAp>(K9rO\rqqY{y8b;p^p9"6ZI>PGJaYGNr!}NZM[L9C7^rխJ_J/I,*HY>_rlJJ6$KI:ZC.[DhY{;Zg7)vFCu#[Z֥JMZrFAGzr@rj'YLdc@&rv,[=oոq[Trn[X4I4Yg;J?[8\-rvDdLcFKaq<p>rK C`G͜rp9[5r*[8[`psw7qP[p<PG[[/6rso^Y{YD>+p6$@M=L9/D >F٠q=[c;a@IIzp_o<L>yYrtY^\&[\BJzAIs@pZX&KdBdrIk,F/ZL8ZjH/FJLF!r[Zr8mr7G{Zc'9^X[-@|ELJr)rѲq@cYZ9L?@6LLAD [aCvZeN[ckK;[7j7[+%[D[pZ7Z"h>S70_YqkCy[ZLk~L?sG:djL8UY[ݮYcX[i =bICwHLkr| q-JoAH{Y69Ӂ>=:or/@ݺrhTYhEq-Z:[ /&EٯL;IrBKKPLu\GĒqYJKYCZK/DK2H[q6$["rwqg>Z5D,HQ[7E[.96CacCKSIJ>8ӆ>1dgGryOqqKVYyX <RY5[OpHV8LLAq6lL [t[R=I76^J[pfKTqjrrKf|ZbCr"|IY<KFK\?v!Y ?$ZsLq"DmqnZLi@]L0rY qLH`GhrH=ptX_!MXr`p[|oEZE9to,YE:xrW[PY ZJqyquZq@[BK6o[r!,ZZt\Y8[[rL08[J"DGF[Ƥ@NrH$D\YtXl[P 8:TJ>I9XX6X[p^8prZ`z GJ.Zmy7LoXLAbZZwLPMY[RE6HymJRrSOۃAjQD\*Y[pbhX[_pM[|rnDQ[zI3q[qid@y;IopG[I?G/Vdq8Ydrr5[rJD:5q+6r,Lgprq9[?skZ8zyRpKIWHZdA@[`^qroY>=ir[8GhLNSDA7:@rUroLGHJJgH`rKpG6 sTq4KZݵ=7\F[@nIDp;HqJ8HIZ8MsDýFk3XJ=YI G։YZ.lH@Zn979ZrZpt:q[q[X )qNYkdVo׶qpIr`pd8['[pZ`YtZe"[urwZI@\FgN@_YYGG6Z+9AJru %pZDFXrMD#D>6=MY9I.pqoZc7mr2Z*[s[\J KHE@ޏL>>lp@OY<GDqJ+G[rsZC;Iq$y[}:{vrI[@6 J2[J [Mr#[D:YEFJZCYF'Ga[Q=w?5[b_gWI59 o[L*?ut7o6GPGXY<GE;/GX[kJD8KiJ(GZ[OY[tYI>[_j9JX.[FqsqrK<dn?\F{[GZYD)6i["Z2[SD?9:rdq[oZws9d;5L(Zv6 moL Jُ>ZZ@J0r1q( Dr:o,rVop'rO[3Z[Lr r[}9.[[ irpJp]?[qqI[ˡ[yZKY[7 gK[&YUo}piA@mP[%ZFO[G$[6H(HFcAh+F5WI8Z8A q[^<92CYZtqlRKYUD]99[6cq&Ga\vpq\IJqD[Gr6fF6;'A[)ojr|Z':I9[dCXZtILM6Doup7/JrEqJJ\^AP;`v;R,7Fg\;mq_GJ1+qkYHmZq]VY LoE7@\q6q!cIKeqrI+6qqjJVSZFd64ZIOs9;Z)cAkBZ]jq%8u;QTIǣEXp[ >M[/&L1ZL&[KpY5Yf]p7H?q$Q[H&9q?[1Gzq6SKQ[t[q~LroUDxIZZr4hY%LbXIF6`H&cq37'@}+[YJpJ@Ls>m@ D=Oq!yJrar=(DKEgEHpρ[*s]LMaC,G6[%]dOrGD2<= sI=I r-rע]NEGMr6ZpoZYpYJE.q/L8KDYLpfEIƞZǾHwpE9[#o+Xq:GLCbHoIEH L:CK?r]mPH"[J3dWfZB>YF'[kqqI':O:xr*pCGZ7PALOA~9x4Fri}JY28YYFRZbZ=GLrkkCJsWy>Y5[X{[r3[9pZ1lL&yppG6Oepi:3[}>:@6J/E҆FcLoqBG @5ZurMKiZ"DHY6XlD \m@b8Xζr6=q{ZG߶1ZAqTX@=G}G5q?7C:pp?5q^*9mZ"[DY|q$[+:sorH/YrZ+7b[I>A\RZkhIJLlppNM :$pdJorNZo\?CZ[ڨLfN<K8<)\{D-KGL;ps[`[IJ^Gq+30W[ L?LD2[_d=(pGY/cwpGKCKGqDYpIp[͸FAp[FX.H>Ibqrwp@eA<~[ cvYW pqrtLA5FPK?#ĊKApu8}Y3q&6^6'DL\$KNdENZ(TY6D@Y0 o̝G D'[~>>5Y\l Lhp)[Y}Gs[dFlYCLbmqDs dl[f;Y1Hp{p#qDPqZxNqUG[ q[:[YvoZ8CrYئHQ>+8o[WYu8BZHdN;l\YL7L:{G{=[0Z@Ys5qiE9YyvC+IqLK%r 2De>-IYYDbr<W@:Ou[,FLCq`?Gag>`ZwKQYwBYZh~Z>q%z:[YqrW7xI/eq1Caj:EGլqDLj8<pHq'K[?DqF|LDI92,ZDFGOIGY"Ldp:FKK[so[D o|IL.pri[G\NKDXp%D[J}Y>Fq'I$KW3[pHzLnZv6H DJLEZJ[!!Iyq<ov9D@H[]G?HHdJ3dH[Bq*9Ѻ;`\K,Y7yD.[CfZ KU8)p?Yq[[Lo]9["[q[uF"pJ[dU8e^[t[^ZNp92~rBAJpSY GG3DL9r YqZGDI[EZ&[}x8[[ZwL*I4}[6޽[p[@?6G=:_vCcxYJ[[[ psJρNqDqgqp)/q [[?<jpE:l@q`[Y2Y}rL qqD=7>}9\:sIr ZqV>UJDKWI*[-B6:YD90F۽pvHp[[Z@uqKF5ZA[|GbN{[an\H9KLo@[rЁrZrH9\:x9J7i79LFqE[r~9)VJFH=>5q {q:gAgI2IVHqb?hpw? spUJp7`GHd|qz"pqppE7q[GqpFZZuL7>qԾppKG[\r0>@Zb|p~XH[ZhJ1[D JzLFaYlB$EHqE7drtYorp"po=i[Z[+P=F5@[a91[[8oՌq<)XqrYr>)L}Z\7EnTK7wG+[{ZZX8F,qzqӰZdrr4CRgAW8[|HrfG>7bn;FXrI6FoMp oHGMhq~wD rYuDPYYr=dqSFx[c<xG{ ]srF^]>.C}Ht[{Y oڈIG.)!K[ۗqa[5du[R[`@G qgD`LY+Jt[YJe'9[6ZJ[%I:M[8pHSqے[&LP[o1KZ|FHYW|[ q2YhY9/H[W~6DZ[8EHdmxLIr>L r?[F[|3YLNpYjP@L3[3V93Ipr|7YKJ7Y4Y18 [eG{qILh[YמHrr[pAC:{$.U6dJsG6rlr@L[3@?CIջqHd[F?L7i6pY!8q0qY5YHYXZ$\-A JhGSYr,[[AFhL6c3rZDXHEQ[6[8[?:WY!g[3rLhZppJY$*q:xJyFmJ\$?D+[77a7[pQD GCrL)5JI7LqF4Z[[L=FH~9HG vL%Y9tpi.[nGrUnDXǗIp=JVJ[phRp_p:V:r;LN+J pLKq"TG 6=Ae^=XhZH3[Zuo>;q 6=!YJIqZCZ[mL*=[Tr _;ksrk@CYY9<LP%[#>FEJ[*pCFHƬKqpL@IPY|8Z99qqLYE\[[݀[! q2\NpZuD+[C\/=p_[ rY[ϪZ KTsKnK8`L# {TpGc[=L?I$lZ޶?ZvfaK;[(q{Z1[aXI>sp"77r[,:@[.Z KKZ1pXC[_%pp_d[C ;nBrC[/Y?5FhB?G`q\7Ln:x[qFFf*Lmr<6q%MLxpoZ^oqJ4aqB@J}LkI [pTr0]d7 q5ZCvyIiFrI[cK[KZFK[ KQA[Yy[(q)clAIYZuHaFH`r[ܟF[5<\Eqaoϖ6uLE>E^Lp[{DH[ErmGz{KX rpYJwzZLY` pqEE*7<[PI(5I7ZqHJ3F6BrG6[FlES[H[|I-r Z}W[Խ@TR[69)Yi@Lz~JqrguX5pm|@e[bF[ZF7Gpr[ȠYsY/CZ7pJ`X A,[>H\6['Lj@CmkC@ZG"q.1TI(@BD!DLDOpKr_ZLEH5q~Z{q?[9؛paG(ZyZ[5HJ|+[4e@~GGҏH$YZF"YY[bEZRG3|G [rZFpr9=pNZ[r~6ZCfLrO,[tϰL*dz&kZ]KnLLjL0F(rgrGێ[ K[FYHITz9}r[֜Pb6poFp!6q3JbQ:&<}I7[bYGpW[ss d>KUGr\Z'Ip~O:8l[`x7II;$r[KZ%EKDG[6Y@8opZ=BLFpqn[FDD=qrZ|BooHJiDYIJErG}K6%Z*=G7[JvsX&s@Z^o[C6qX`JxPYǦDypKqZY]K;/DFqRrE<[DčJ\=2T lKV8S9ZLr6;pi9wX|>)5Zn:pq [poK:!GF]K oFrWoVqUrdFFJZpFpK{@N\%![fg6Z}IkJA%F:qCqzYHq[O$ozY[2t@r?,,s[[KLI^YL-qILr 7[FqCFDƜ+4[q~Gy@}79q-E)9mqGҢJX ErH[[[Y8iZuHK'@[ >@moi8dG[EͳDjpYoo /@0[[H{[cAY:K-o=8kqZEr-=r:|@@tq$D+LJpOrD>HYroG5rD>FH+<yL;*J(G9<[sY<CEFDZuwqLJAY7IpҾGY+pJ;08R@opeZoi@TY KNqZ[LqJ]XGGJl}@DGq?YjrLhZyHze[(&X2F6D or JpEV\ L0d[tjGqnYPKL&(Z pz%[[a;Fx8pGF[HQrr=dIr|G=+qqp}op/Yir7.?fq!=GQpV=<d=Cq=F6L>I}q{-pp}[oLg2HՍq F2p2D@YeH <Nr1HUrLjbZ*bqU9<&JfoLiD~"p)i}?WDi6[Ʉη/+TQ{tQ03k&]NA
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/rendering/opengl/rasterization_backend.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """OpenGL rasterization backend for TF Graphics.""" import tensorflow as tf from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape # pylint: disable=g-import-not-at-top try: from tensorflow_graphics.rendering.opengl import gen_rasterizer_op as render_ops except ImportError: import os dir_path = os.path.dirname(os.path.abspath(__file__)) render_ops = tf.load_op_library(os.path.join(dir_path, "rasterizer_op.so")) # pylint: enable=g-import-not-at-top def _dim_value(dim): return 1 if dim is None else tf.compat.v1.dimension_value(dim) # Empty vertex shader; all the work happens in the geometry shader. vertex_shader = """ #version 430 void main() { } """ # Geometry shader that projects the vertices of visible triangles onto the image # plane. geometry_shader = """ #version 430 uniform mat4 view_projection_matrix; layout(points) in; layout(triangle_strip, max_vertices=3) out; out layout(location = 0) vec2 barycentric_coordinates; out layout(location = 1) float triangle_index; layout(binding=0) buffer triangular_mesh { float mesh_buffer[]; }; vec3 get_vertex_position(int vertex_index) { // Triangles are packed as 3 consecuitve vertices, each with 3 coordinates. int offset = gl_PrimitiveIDIn * 9 + vertex_index * 3; return vec3(mesh_buffer[offset], mesh_buffer[offset + 1], mesh_buffer[offset + 2]); } void main() { vec3 positions[3] = {get_vertex_position(0), get_vertex_position(1), get_vertex_position(2)}; vec4 projected_vertices[3] = { view_projection_matrix * vec4(positions[0], 1.0), view_projection_matrix * vec4(positions[1], 1.0), view_projection_matrix * vec4(positions[2], 1.0)}; for (int i = 0; i < 3; ++i) { // gl_Position is a pre-defined size 4 output variable. gl_Position = projected_vertices[i]; barycentric_coordinates = vec2(i==0 ? 1.0 : 0.0, i==1 ? 1.0 : 0.0); triangle_index = gl_PrimitiveIDIn; EmitVertex(); } EndPrimitive(); } """ # Fragment shader that packs barycentric coordinates, and triangle index. fragment_shader = """ #version 430 in layout(location = 0) vec2 barycentric_coordinates; in layout(location = 1) float triangle_index; out vec4 output_color; void main() { output_color = vec4(round(triangle_index + 1.0), barycentric_coordinates, 1.0); } """ def rasterize(vertices, triangles, view_projection_matrices, image_size, name=None): """Rasterizes the scene. This rasterizer estimates which triangle is associated with each pixel using OpenGL. Note: In the following, A1 to An are optional batch dimensions which must be broadcast compatible for inputs `vertices` and `view_projection_matrices`. Args: vertices: A tensor of shape `[A1, ..., An, V, 3]` containing batches of `V` vertices, each defined by a 3D point. triangles: A tensor of shape `[T, 3]` containing `T` triangles, each associated with 3 vertices from `scene_vertices` view_projection_matrices: A tensor of shape `[A1, ..., An, 4, 4]` containing batches of view projection matrices image_size: An tuple of integers (width, height) containing the dimensions in pixels of the rasterized image. name: A name for this op. Defaults to 'rasterization_backend_rasterize'. Returns: A tuple of 3 elements. The first one of shape `[A1, ..., An, H, W, 1]` representing the triangle index associated with each pixel. If no triangle is associated to a pixel, the index is set to -1. The second element in the tuple is of shape `[A1, ..., An, H, W, 3]` and correspond to barycentric coordinates per pixel. The last element in the tuple is of shape `[A1, ..., An, H, W, 1]` and stores a value of `0` of the pixel is assciated with the background, and `1` with the foreground. """ with tf.compat.v1.name_scope(name, "rasterization_backend_rasterize", (vertices, triangles, view_projection_matrices)): vertices = tf.convert_to_tensor(value=vertices) triangles = tf.convert_to_tensor(value=triangles) view_projection_matrices = tf.convert_to_tensor( value=view_projection_matrices) shape.check_static( tensor=vertices, tensor_name="vertices", has_rank_greater_than=1, has_dim_equals=((-1, 3))) shape.check_static( tensor=triangles, tensor_name="triangles", has_rank=2, has_dim_equals=((-1, 3))) shape.check_static( tensor=view_projection_matrices, tensor_name="view_projection_matrices", has_rank_greater_than=1, has_dim_equals=((-1, 4), (-2, 4))) shape.compare_batch_dimensions( tensors=(vertices, view_projection_matrices), tensor_names=("vertices", "view_projection_matrices"), last_axes=(-3, -3), broadcast_compatible=True) common_batch_shape = shape.get_broadcasted_shape( vertices.shape[:-2], view_projection_matrices.shape[:-2]) common_batch_shape = [_dim_value(dim) for dim in common_batch_shape] vertices = tf.broadcast_to(vertices, common_batch_shape + vertices.shape[-2:]) view_projection_matrices = tf.broadcast_to(view_projection_matrices, common_batch_shape + [4, 4]) geometry = tf.gather(vertices, triangles, axis=-2) rasterized = render_ops.rasterize( num_points=geometry.shape[-3], alpha_clear=0.0, enable_cull_face=True, variable_names=("view_projection_matrix", "triangular_mesh"), variable_kinds=("mat", "buffer"), variable_values=(view_projection_matrices, tf.reshape(geometry, shape=common_batch_shape + [-1])), output_resolution=image_size, vertex_shader=vertex_shader, geometry_shader=geometry_shader, fragment_shader=fragment_shader) triangle_index = tf.cast(rasterized[..., 0], tf.int32) - 1 # Slicing of the tensor will result in all batch dimensions being # `None` for tensorflow graph mode, therefore we have to fix it in order to # have explicit shape. width, height = image_size triangle_index = tf.reshape(triangle_index, common_batch_shape + [height, width, 1]) barycentric_coordinates = rasterized[..., 1:3] barycentric_coordinates = tf.concat( (barycentric_coordinates, 1.0 - barycentric_coordinates[..., 0:1] - barycentric_coordinates[..., 1:2]), axis=-1) mask = tf.cast(rasterized[..., 3], tf.int32) mask = tf.reshape(mask, common_batch_shape + [height, width, 1]) return triangle_index, barycentric_coordinates, mask # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """OpenGL rasterization backend for TF Graphics.""" import tensorflow as tf from tensorflow_graphics.rendering import framebuffer as fb from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape # pylint: disable=g-import-not-at-top try: from tensorflow_graphics.rendering.opengl import gen_rasterizer_op as render_ops except ImportError: import os dir_path = os.path.dirname(os.path.abspath(__file__)) render_ops = tf.load_op_library(os.path.join(dir_path, "rasterizer_op.so")) # pylint: enable=g-import-not-at-top def _dim_value(dim): return 1 if dim is None else tf.compat.v1.dimension_value(dim) # Empty vertex shader; all the work happens in the geometry shader. vertex_shader = """ #version 430 void main() { } """ # Geometry shader that projects the vertices of visible triangles onto the image # plane. geometry_shader = """ #version 430 uniform mat4 view_projection_matrix; layout(points) in; layout(triangle_strip, max_vertices=3) out; out layout(location = 0) vec2 barycentric_coordinates; out layout(location = 1) float triangle_index; layout(binding=0) buffer triangular_mesh { float mesh_buffer[]; }; vec3 get_vertex_position(int vertex_index) { // Triangles are packed as 3 consecuitve vertices, each with 3 coordinates. int offset = gl_PrimitiveIDIn * 9 + vertex_index * 3; return vec3(mesh_buffer[offset], mesh_buffer[offset + 1], mesh_buffer[offset + 2]); } void main() { vec3 positions[3] = {get_vertex_position(0), get_vertex_position(1), get_vertex_position(2)}; vec4 projected_vertices[3] = { view_projection_matrix * vec4(positions[0], 1.0), view_projection_matrix * vec4(positions[1], 1.0), view_projection_matrix * vec4(positions[2], 1.0)}; for (int i = 0; i < 3; ++i) { // gl_Position is a pre-defined size 4 output variable. gl_Position = projected_vertices[i]; barycentric_coordinates = vec2(i==0 ? 1.0 : 0.0, i==1 ? 1.0 : 0.0); triangle_index = gl_PrimitiveIDIn; EmitVertex(); } EndPrimitive(); } """ # Fragment shader that packs barycentric coordinates, and triangle index. fragment_shader = """ #version 430 in layout(location = 0) vec2 barycentric_coordinates; in layout(location = 1) float triangle_index; out vec4 output_color; void main() { output_color = vec4(round(triangle_index), barycentric_coordinates, 1.0); } """ def rasterize(vertices, triangles, view_projection_matrices, image_size, name=None): """Rasterizes the scene. This rasterizer estimates which triangle is associated with each pixel using OpenGL. Note: In the following, A1 to An are optional batch dimensions which must be broadcast compatible for inputs `vertices` and `view_projection_matrices`. Args: vertices: A tensor of shape `[A1, ..., An, V, 3]` containing batches of `V` vertices, each defined by a 3D point. triangles: A tensor of shape `[T, 3]` containing `T` triangles, each associated with 3 vertices from `scene_vertices` view_projection_matrices: A tensor of shape `[A1, ..., An, 4, 4]` containing batches of view projection matrices image_size: An tuple of integers (width, height) containing the dimensions in pixels of the rasterized image. name: A name for this op. Defaults to 'rasterization_backend_rasterize'. Returns: A Framebuffer containing the rasterized values: barycentrics, triangle_id, foreground_mask, vertex_ids. Returned Tensors have shape [batch, num_layers, height, width, channels] Note: triangle_id contains the triangle id value for each pixel in the output image. For pixels within the mesh, this is the integer value in the range [0, num_vertices] from triangles. For vertices outside the mesh this is 0; 0 can either indicate belonging to triangle 0, or being outside the mesh. This ensures all returned triangle ids will validly index into the vertex array, enabling the use of tf.gather with indices from this tensor. The barycentric coordinates can be used to determine pixel validity instead. See framebuffer.py for a description of the Framebuffer fields. """ with tf.compat.v1.name_scope(name, "rasterization_backend_rasterize", (vertices, triangles, view_projection_matrices)): vertices = tf.convert_to_tensor(value=vertices) triangles = tf.convert_to_tensor(value=triangles) view_projection_matrices = tf.convert_to_tensor( value=view_projection_matrices) shape.check_static( tensor=vertices, tensor_name="vertices", has_rank_greater_than=1, has_dim_equals=((-1, 3))) shape.check_static( tensor=triangles, tensor_name="triangles", has_rank=2, has_dim_equals=((-1, 3))) shape.check_static( tensor=view_projection_matrices, tensor_name="view_projection_matrices", has_rank_greater_than=1, has_dim_equals=((-1, 4), (-2, 4))) shape.compare_batch_dimensions( tensors=(vertices, view_projection_matrices), tensor_names=("vertices", "view_projection_matrices"), last_axes=(-3, -3), broadcast_compatible=True) common_batch_shape = shape.get_broadcasted_shape( vertices.shape[:-2], view_projection_matrices.shape[:-2]) common_batch_shape = [_dim_value(dim) for dim in common_batch_shape] vertices = tf.broadcast_to(vertices, common_batch_shape + vertices.shape[-2:]) view_projection_matrices = tf.broadcast_to(view_projection_matrices, common_batch_shape + [4, 4]) geometry = tf.gather(vertices, triangles, axis=-2) rasterized = render_ops.rasterize( num_points=geometry.shape[-3], alpha_clear=0.0, enable_cull_face=True, variable_names=("view_projection_matrix", "triangular_mesh"), variable_kinds=("mat", "buffer"), variable_values=(view_projection_matrices, tf.reshape(geometry, shape=common_batch_shape + [-1])), output_resolution=image_size, vertex_shader=vertex_shader, geometry_shader=geometry_shader, fragment_shader=fragment_shader) triangle_index = tf.cast(rasterized[..., 0], tf.int32) # Slicing of the tensor will result in all batch dimensions being # `None` for tensorflow graph mode, therefore we have to fix it in order to # have explicit shape. width, height = image_size triangle_index = tf.reshape(triangle_index, common_batch_shape + [height, width, 1]) barycentric_coordinates = rasterized[..., 1:3] barycentric_coordinates = tf.concat( (barycentric_coordinates, 1.0 - barycentric_coordinates[..., 0:1] - barycentric_coordinates[..., 1:2]), axis=-1) mask = tf.cast(rasterized[..., 3], tf.int32) mask = tf.reshape(mask, common_batch_shape + [height, width, 1]) triangles_batch = tf.broadcast_to(triangles, common_batch_shape + triangles.shape) vertex_ids = tf.gather( triangles_batch, triangle_index[..., 0], batch_dims=len(common_batch_shape)) return fb.Framebuffer( foreground_mask=mask, triangle_id=triangle_index, vertex_ids=vertex_ids, barycentrics=fb.RasterizedAttribute( value=barycentric_coordinates, d_dx=None, d_dy=None)) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/rendering/opengl/tests/rasterization_backend_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.representation import grid from tensorflow_graphics.geometry.transformation import look_at from tensorflow_graphics.rendering.camera import perspective from tensorflow_graphics.rendering.opengl import math as glm from tensorflow_graphics.rendering.opengl import rasterization_backend from tensorflow_graphics.util import test_case _IMAGE_HEIGHT = 5 _IMAGE_WIDTH = 7 _TRIANGLE_SIZE = 2.0 def _generate_vertices_and_view_matrices(): camera_origin = ((0.0, 0.0, 0.0), (0.0, 0.0, 0.0)) camera_up = ((0.0, 1.0, 0.0), (0.0, 1.0, 0.0)) look_at_point = ((0.0, 0.0, 1.0), (0.0, 0.0, -1.0)) field_of_view = ((60 * np.math.pi / 180,), (60 * np.math.pi / 180,)) near_plane = ((0.01,), (0.01,)) far_plane = ((400.0,), (400.0,)) aspect_ratio = ((float(_IMAGE_WIDTH) / float(_IMAGE_HEIGHT),), (float(_IMAGE_WIDTH) / float(_IMAGE_HEIGHT),)) # Construct the view projection matrix. world_to_camera = look_at.right_handed(camera_origin, look_at_point, camera_up) perspective_matrix = perspective.right_handed(field_of_view, aspect_ratio, near_plane, far_plane) view_projection_matrix = tf.linalg.matmul(perspective_matrix, world_to_camera) depth = 1.0 vertices = (((-10.0 * _TRIANGLE_SIZE, 10.0 * _TRIANGLE_SIZE, depth), (10.0 * _TRIANGLE_SIZE, 10.0 * _TRIANGLE_SIZE, depth), (0.0, -10.0 * _TRIANGLE_SIZE, depth)), ((-_TRIANGLE_SIZE, 0.0, depth), (0.0, _TRIANGLE_SIZE, depth), (0.0, 0.0, depth))) return vertices, view_projection_matrix def _proxy_rasterize(vertices, triangles, view_projection_matrices): return rasterization_backend.rasterize(vertices, triangles, view_projection_matrices, (_IMAGE_WIDTH, _IMAGE_HEIGHT)) class RasterizationBackendTest(test_case.TestCase): @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (2, 6, 32, 2), (17, 3), (2, 6, 4, 4)), ("must have exactly 3 dimensions in axis -1", (2, 6, 32, 3), (17, 2), (2, 6, 4, 4)), ("must have a rank of 2", (2, 6, 32, 3), (3, 17, 2), (2, 6, 4, 4)), ("must have exactly 4 dimensions in axis -1", (2, 6, 32, 3), (17, 3), (2, 6, 4, 3)), ("must have exactly 4 dimensions in axis -2", (2, 6, 32, 3), (17, 3), (2, 6, 3, 4)), ("Not all batch dimensions are broadcast-compatible", (3, 6, 32, 3), (17, 3), (5, 6, 4, 4)), ) def test_rasterize_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(_proxy_rasterize, error_msg, shapes) @parameterized.parameters( (((32, 3), (17, 3), (4, 4)), (tf.float32, tf.int32, tf.float32)), (((None, 32, 3), (17, 3), (None, 4, 4)), (tf.float32, tf.int32, tf.float32)), (((None, 9, 32, 3), (17, 3), (None, 9, 4, 4)), (tf.float32, tf.int32, tf.float32)), ) def test_rasterize_exception_not_raised(self, shapes, dtypes): self.assert_exception_is_not_raised( _proxy_rasterize, shapes=shapes, dtypes=dtypes) def test_rasterize_batch_vertices_only(self): triangles = np.array(((0, 1, 2),), np.int32) vertices, view_projection_matrix = _generate_vertices_and_view_matrices() _, _, mask = rasterization_backend.rasterize(vertices, triangles, view_projection_matrix[0], (_IMAGE_WIDTH, _IMAGE_HEIGHT)) self.assertAllEqual(mask[0, ...], tf.ones_like(mask[0, ...])) gt_layer_1 = np.zeros((_IMAGE_HEIGHT, _IMAGE_WIDTH, 1), np.float32) gt_layer_1[_IMAGE_HEIGHT // 2:, _IMAGE_WIDTH // 2:, 0] = 1.0 self.assertAllEqual(mask[1, ...], gt_layer_1) def test_rasterize_batch_view_only(self): triangles = np.array(((0, 1, 2),), np.int32) vertices, view_projection_matrix = _generate_vertices_and_view_matrices() _, _, mask = rasterization_backend.rasterize(vertices[0], triangles, view_projection_matrix, (_IMAGE_WIDTH, _IMAGE_HEIGHT)) self.assertAllEqual(mask[0, ...], tf.ones_like(mask[0, ...])) self.assertAllEqual(mask[1, ...], tf.zeros_like(mask[1, ...])) def test_rasterize_preset(self): camera_origin = (0.0, 0.0, 0.0) camera_up = (0.0, 1.0, 0.0) look_at_point = (0.0, 0.0, 1.0) field_of_view = (60 * np.math.pi / 180,) near_plane = (0.01,) far_plane = (400.0,) # Construct the view projection matrix. model_to_eye_matrix = look_at.right_handed(camera_origin, look_at_point, camera_up) perspective_matrix = perspective.right_handed( field_of_view, (float(_IMAGE_WIDTH) / float(_IMAGE_HEIGHT),), near_plane, far_plane) view_projection_matrix = tf.linalg.matmul(perspective_matrix, model_to_eye_matrix) depth = 1.0 vertices = ((-2.0 * _TRIANGLE_SIZE, 0.0, depth), (0.0, _TRIANGLE_SIZE, depth), (0.0, 0.0, depth), (0.0, -_TRIANGLE_SIZE, depth)) triangles = np.array(((1, 2, 0), (0, 2, 3)), np.int32) predicted_triangle_index, predicted_barycentrics, predicted_mask = rasterization_backend.rasterize( vertices, triangles, view_projection_matrix, (_IMAGE_WIDTH, _IMAGE_HEIGHT)) with self.subTest(name="triangle_index"): groundtruth_triangle_index = np.zeros((_IMAGE_HEIGHT, _IMAGE_WIDTH, 1), dtype=np.int32) groundtruth_triangle_index[..., :_IMAGE_WIDTH // 2, 0] = -1 groundtruth_triangle_index[:_IMAGE_HEIGHT // 2, _IMAGE_WIDTH // 2:, 0] = 1 self.assertAllEqual(groundtruth_triangle_index, predicted_triangle_index) with self.subTest(name="mask"): groundtruth_mask = np.ones((_IMAGE_HEIGHT, _IMAGE_WIDTH, 1), dtype=np.int32) groundtruth_mask[..., :_IMAGE_WIDTH // 2, 0] = 0 self.assertAllEqual(groundtruth_mask, predicted_mask) attributes = np.array( ((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))).astype(np.float32) perspective_correct_interpolation = lambda geometry, pixels: glm.perspective_correct_interpolation( # pylint: disable=g-long-lambda,line-too-long geometry, attributes, pixels, model_to_eye_matrix, perspective_matrix, np.array((_IMAGE_WIDTH, _IMAGE_HEIGHT)).astype(np.float32), np.array((0.0, 0.0)).astype(np.float32)) with self.subTest(name="barycentric_coordinates_triangle_0"): geometry_0 = tf.gather(vertices, triangles[0, :]) pixels_0 = tf.transpose( grid.generate((3.5, 2.5), (6.5, 4.5), (4, 3)), perm=(1, 0, 2)) barycentrics_gt_0 = perspective_correct_interpolation( geometry_0, pixels_0) self.assertAllClose( barycentrics_gt_0, predicted_barycentrics[2:, 3:, :], atol=1e-3) with self.subTest(name="barycentric_coordinates_triangle_1"): geometry_1 = tf.gather(vertices, triangles[1, :]) pixels_1 = tf.transpose( grid.generate((3.5, 0.5), (6.5, 1.5), (4, 2)), perm=(1, 0, 2)) barycentrics_gt_1 = perspective_correct_interpolation( geometry_1, pixels_1) self.assertAllClose( barycentrics_gt_1, predicted_barycentrics[0:2, 3:, :], atol=1e-3)
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.representation import grid from tensorflow_graphics.geometry.transformation import look_at from tensorflow_graphics.rendering.camera import perspective from tensorflow_graphics.rendering.opengl import math as glm from tensorflow_graphics.rendering.opengl import rasterization_backend from tensorflow_graphics.util import test_case _IMAGE_HEIGHT = 5 _IMAGE_WIDTH = 7 _TRIANGLE_SIZE = 2.0 def _generate_vertices_and_view_matrices(): camera_origin = ((0.0, 0.0, 0.0), (0.0, 0.0, 0.0)) camera_up = ((0.0, 1.0, 0.0), (0.0, 1.0, 0.0)) look_at_point = ((0.0, 0.0, 1.0), (0.0, 0.0, -1.0)) field_of_view = ((60 * np.math.pi / 180,), (60 * np.math.pi / 180,)) near_plane = ((0.01,), (0.01,)) far_plane = ((400.0,), (400.0,)) aspect_ratio = ((float(_IMAGE_WIDTH) / float(_IMAGE_HEIGHT),), (float(_IMAGE_WIDTH) / float(_IMAGE_HEIGHT),)) # Construct the view projection matrix. world_to_camera = look_at.right_handed(camera_origin, look_at_point, camera_up) perspective_matrix = perspective.right_handed(field_of_view, aspect_ratio, near_plane, far_plane) view_projection_matrix = tf.linalg.matmul(perspective_matrix, world_to_camera) depth = 1.0 vertices = (((-10.0 * _TRIANGLE_SIZE, 10.0 * _TRIANGLE_SIZE, depth), (10.0 * _TRIANGLE_SIZE, 10.0 * _TRIANGLE_SIZE, depth), (0.0, -10.0 * _TRIANGLE_SIZE, depth)), ((-_TRIANGLE_SIZE, 0.0, depth), (0.0, _TRIANGLE_SIZE, depth), (0.0, 0.0, depth))) return vertices, view_projection_matrix def _proxy_rasterize(vertices, triangles, view_projection_matrices): return rasterization_backend.rasterize(vertices, triangles, view_projection_matrices, (_IMAGE_WIDTH, _IMAGE_HEIGHT)) class RasterizationBackendTest(test_case.TestCase): @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (2, 6, 32, 2), (17, 3), (2, 6, 4, 4)), ("must have exactly 3 dimensions in axis -1", (2, 6, 32, 3), (17, 2), (2, 6, 4, 4)), ("must have a rank of 2", (2, 6, 32, 3), (3, 17, 2), (2, 6, 4, 4)), ("must have exactly 4 dimensions in axis -1", (2, 6, 32, 3), (17, 3), (2, 6, 4, 3)), ("must have exactly 4 dimensions in axis -2", (2, 6, 32, 3), (17, 3), (2, 6, 3, 4)), ("Not all batch dimensions are broadcast-compatible", (3, 6, 32, 3), (17, 3), (5, 6, 4, 4)), ) def test_rasterize_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(_proxy_rasterize, error_msg, shapes) @parameterized.parameters( (((32, 3), (17, 3), (4, 4)), (tf.float32, tf.int32, tf.float32)), (((None, 32, 3), (17, 3), (None, 4, 4)), (tf.float32, tf.int32, tf.float32)), (((None, 9, 32, 3), (17, 3), (None, 9, 4, 4)), (tf.float32, tf.int32, tf.float32)), ) def test_rasterize_exception_not_raised(self, shapes, dtypes): self.assert_exception_is_not_raised( _proxy_rasterize, shapes=shapes, dtypes=dtypes) def test_rasterize_batch_vertices_only(self): triangles = np.array(((0, 1, 2),), np.int32) vertices, view_projection_matrix = _generate_vertices_and_view_matrices() predicted_fb = rasterization_backend.rasterize( vertices, triangles, view_projection_matrix[0], (_IMAGE_WIDTH, _IMAGE_HEIGHT)) mask = predicted_fb.foreground_mask self.assertAllEqual(mask[0, ...], tf.ones_like(mask[0, ...])) gt_layer_1 = np.zeros((_IMAGE_HEIGHT, _IMAGE_WIDTH, 1), np.float32) gt_layer_1[_IMAGE_HEIGHT // 2:, _IMAGE_WIDTH // 2:, 0] = 1.0 self.assertAllEqual(mask[1, ...], gt_layer_1) def test_rasterize_batch_view_only(self): triangles = np.array(((0, 1, 2),), np.int32) vertices, view_projection_matrix = _generate_vertices_and_view_matrices() predicted_fb = rasterization_backend.rasterize( vertices[0], triangles, view_projection_matrix, (_IMAGE_WIDTH, _IMAGE_HEIGHT)) self.assertAllEqual(predicted_fb.foreground_mask[0, ...], tf.ones_like(predicted_fb.foreground_mask[0, ...])) self.assertAllEqual(predicted_fb.foreground_mask[1, ...], tf.zeros_like(predicted_fb.foreground_mask[1, ...])) def test_rasterize_preset(self): camera_origin = (0.0, 0.0, 0.0) camera_up = (0.0, 1.0, 0.0) look_at_point = (0.0, 0.0, 1.0) field_of_view = (60 * np.math.pi / 180,) near_plane = (0.01,) far_plane = (400.0,) # Construct the view projection matrix. model_to_eye_matrix = look_at.right_handed(camera_origin, look_at_point, camera_up) perspective_matrix = perspective.right_handed( field_of_view, (float(_IMAGE_WIDTH) / float(_IMAGE_HEIGHT),), near_plane, far_plane) view_projection_matrix = tf.linalg.matmul(perspective_matrix, model_to_eye_matrix) depth = 1.0 vertices = ((-2.0 * _TRIANGLE_SIZE, 0.0, depth), (0.0, _TRIANGLE_SIZE, depth), (0.0, 0.0, depth), (0.0, -_TRIANGLE_SIZE, depth)) triangles = np.array(((1, 2, 0), (0, 2, 3)), np.int32) predicted_fb = rasterization_backend.rasterize( vertices, triangles, view_projection_matrix, (_IMAGE_WIDTH, _IMAGE_HEIGHT)) with self.subTest(name="triangle_index"): groundtruth_triangle_index = np.zeros((_IMAGE_HEIGHT, _IMAGE_WIDTH, 1), dtype=np.int32) groundtruth_triangle_index[..., :_IMAGE_WIDTH // 2, 0] = 0 groundtruth_triangle_index[:_IMAGE_HEIGHT // 2, _IMAGE_WIDTH // 2:, 0] = 1 self.assertAllEqual(groundtruth_triangle_index, predicted_fb.triangle_id) with self.subTest(name="mask"): groundtruth_mask = np.ones((_IMAGE_HEIGHT, _IMAGE_WIDTH, 1), dtype=np.int32) groundtruth_mask[..., :_IMAGE_WIDTH // 2, 0] = 0 self.assertAllEqual(groundtruth_mask, predicted_fb.foreground_mask) attributes = np.array( ((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))).astype(np.float32) perspective_correct_interpolation = lambda geometry, pixels: glm.perspective_correct_interpolation( # pylint: disable=g-long-lambda,line-too-long geometry, attributes, pixels, model_to_eye_matrix, perspective_matrix, np.array((_IMAGE_WIDTH, _IMAGE_HEIGHT)).astype(np.float32), np.array((0.0, 0.0)).astype(np.float32)) with self.subTest(name="barycentric_coordinates_triangle_0"): geometry_0 = tf.gather(vertices, triangles[0, :]) pixels_0 = tf.transpose( grid.generate((3.5, 2.5), (6.5, 4.5), (4, 3)), perm=(1, 0, 2)) barycentrics_gt_0 = perspective_correct_interpolation( geometry_0, pixels_0) self.assertAllClose( barycentrics_gt_0, predicted_fb.barycentrics.value[2:, 3:, :], atol=1e-3) with self.subTest(name="barycentric_coordinates_triangle_1"): geometry_1 = tf.gather(vertices, triangles[1, :]) pixels_1 = tf.transpose( grid.generate((3.5, 0.5), (6.5, 1.5), (4, 2)), perm=(1, 0, 2)) barycentrics_gt_1 = perspective_correct_interpolation( geometry_1, pixels_1) self.assertAllClose( barycentrics_gt_1, predicted_fb.barycentrics.value[0:2, 3:, :], atol=1e-3)
1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/rendering/rasterization_backend.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rasterization backends selector for TF Graphics.""" import enum from tensorflow_graphics.rendering.opengl import rasterization_backend as gl_backend from tensorflow_graphics.util import export_api class RasterizationBackends(enum.Enum): OPENGL = 0 _BACKENDS = { RasterizationBackends.OPENGL: gl_backend, } def rasterize(vertices, triangles, view_projection_matrices, image_size, backend=RasterizationBackends.OPENGL): """Rasterizes the scene. This rasterizer estimates which triangle is associated with each pixel using OpenGL. Note: In the following, A1 to An are optional batch dimensions which must be broadcast compatible for inputs `vertices` and `view_projection_matrices`. Args: vertices: A tensor of shape `[A1, ..., An, V, 3]` containing batches of `V` vertices, each defined by a 3D point. triangles: A tensor of shape `[T, 3]` containing `T` triangles, each associated with 3 vertices from `scene_vertices` view_projection_matrices: A tensor of shape `[A1, ..., An, 4, 4]` containing batches of view projection matrices image_size: An tuple of integers (width, height) containing the dimensions in pixels of the rasterized image. backend: An enum containing the backend method to use for rasterization. Supported options are defined in the RasterizationBackends enum. Returns: A tuple of 3 elements. The first one of shape `[A1, ..., An, H, W, 1]` representing the triangle index associated with each pixel. If no triangle is associated to a pixel, the index is set to -1. The second element in the tuple is of shape `[A1, ..., An, H, W, 3]` and correspond to barycentric coordinates per pixel. The last element in the tuple is of shape `[A1, ..., An, H, W]` and stores a value of `0` of the pixel is assciated with the background, and `1` with the foreground. """ return _BACKENDS[backend].rasterize(vertices, triangles, view_projection_matrices, image_size) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rasterization backends selector for TF Graphics.""" import enum from tensorflow_graphics.rendering.opengl import rasterization_backend as gl_backend from tensorflow_graphics.util import export_api class RasterizationBackends(enum.Enum): OPENGL = 0 _BACKENDS = { RasterizationBackends.OPENGL: gl_backend, } def rasterize(vertices, triangles, view_projection_matrices, image_size, backend=RasterizationBackends.OPENGL): """Rasterizes the scene. This rasterizer estimates which triangle is associated with each pixel using OpenGL. Note: In the following, A1 to An are optional batch dimensions which must be broadcast compatible for inputs `vertices` and `view_projection_matrices`. Args: vertices: A tensor of shape `[A1, ..., An, V, 3]` containing batches of `V` vertices, each defined by a 3D point. triangles: A tensor of shape `[T, 3]` containing `T` triangles, each associated with 3 vertices from `scene_vertices` view_projection_matrices: A tensor of shape `[A1, ..., An, 4, 4]` containing batches of view projection matrices image_size: An tuple of integers (width, height) containing the dimensions in pixels of the rasterized image. backend: An enum containing the backend method to use for rasterization. Supported options are defined in the RasterizationBackends enum. Returns: A Framebuffer containing the rasterized values: barycentrics, triangle_id, foreground_mask, vertex_ids. Returned Tensors have shape [batch, num_layers, height, width, channels] Note: triangle_id contains the triangle id value for each pixel in the output image. For pixels within the mesh, this is the integer value in the range [0, num_vertices] from triangles. For vertices outside the mesh this is 0; 0 can either indicate belonging to triangle 0, or being outside the mesh. This ensures all returned triangle ids will validly index into the vertex array, enabling the use of tf.gather with indices from this tensor. The barycentric coordinates can be used to determine pixel validity instead. See framebuffer.py for a description of the Framebuffer fields. """ return _BACKENDS[backend].rasterize(vertices, triangles, view_projection_matrices, image_size) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/rendering/triangle_rasterizer.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements a differentiable rasterizer of triangular meshes. The resulting rendering contains perspective-correct interpolation of attributes defined at the vertices of the rasterized meshes. This rasterizer does not provide gradients through visibility, but it does through visible geometry and attributes. """ import tensorflow as tf from tensorflow_graphics.rendering import rasterization_backend from tensorflow_graphics.rendering.opengl import math as glm from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def _perspective_correct_barycentrics(vertices_per_pixel, model_to_eye_matrix, perspective_matrix, image_size_float): """Creates the pixels grid and computes barycentrics.""" # Construct the pixel grid with half-integer pixel centers. width = image_size_float[1] height = image_size_float[0] px = tf.linspace(0.5, width - 0.5, num=int(width)) py = tf.linspace(0.5, height - 0.5, num=int(height)) xv, yv = tf.meshgrid(px, py) pixel_position = tf.stack((xv, yv), axis=-1) return glm.perspective_correct_barycentrics(vertices_per_pixel, pixel_position, model_to_eye_matrix, perspective_matrix, (width, height)) def _perspective_correct_attributes(attribute, barycentrics, triangles, triangle_index, len_batch_shape): attribute = tf.gather(attribute, triangles, axis=-2) attribute_per_pixel = tf.gather( attribute, triangle_index, axis=-3, batch_dims=len_batch_shape) return glm.interpolate_attributes(attribute_per_pixel, barycentrics) def _dim_value(dim): return 1 if dim is None else tf.compat.v1.dimension_value(dim) def rasterize(vertices, triangles, attributes, model_to_eye_matrix, perspective_matrix, image_size, backend=rasterization_backend.RasterizationBackends.OPENGL, name=None): """Rasterizes the scene. Note: In the following, A1 to An are optional batch dimensions. Args: vertices: A tensor of shape `[A1, ..., An, V, 3]` containing batches of `V` vertices, each defined by a 3D point. triangles: A tensor of shape `[T, 3]` containing `T` triangles, each associated with 3 vertices from `vertices`. attributes: A dictionary of tensors, each of shape `[A1, ..., An, V, K_a]` containing batches of `V` vertices, each associated with K-dimensional attributes. K_a may vary by attribute. model_to_eye_matrix: A tensor of shape `[A1, ..., An, 4, 4]` containing batches of matrices used to transform vertices from model to eye coordinates. perspective_matrix: A tensor of shape `[A1, ..., An, 4, 4]` containing batches of matrices used to project vertices from eye to clip coordinates. image_size: A tuple (height, width) containing the dimensions in pixels of the rasterized image. backend: A rasterization_backend.RasterizationBackends enum containing the backend method to use for rasterization. name: A name for this op. Defaults to 'triangle_rasterizer_rasterize'. Returns: A dictionary. The key "mask" is of shape `[A1, ..., An, height, width, 1]` and stores a value of `0` of the pixel is assciated with the background, and `1` with the foreground. The key "barycentrics" is of shape `[A1, ..., An, height, width, 3]` and stores barycentric weights. Finally, the dictionary contains perspective correct interpolated attributes of shape `[A1, ..., An, height, width, K]` per entry in the `attributes` dictionary. """ with tf.compat.v1.name_scope(name, "triangle_rasterizer_rasterize", (vertices, triangles, attributes, model_to_eye_matrix, perspective_matrix)): vertices = tf.convert_to_tensor(value=vertices) triangles = tf.convert_to_tensor(value=triangles) model_to_eye_matrix = tf.convert_to_tensor(value=model_to_eye_matrix) perspective_matrix = tf.convert_to_tensor(value=perspective_matrix) shape.check_static( tensor=vertices, tensor_name="vertices", has_rank_greater_than=1, has_dim_equals=((-1, 3))) shape.check_static( tensor=triangles, tensor_name="triangles", has_rank=2, has_dim_equals=((-1, 3))) shape.check_static( tensor=model_to_eye_matrix, tensor_name="model_to_eye_matrix", has_dim_equals=(((-2, 4), (-1, 4)))) shape.check_static( tensor=perspective_matrix, tensor_name="perspective_matrix", has_dim_equals=(((-2, 4), (-1, 4)))) image_size_float = (float(image_size[0]), float(image_size[1])) image_size_backend = (int(image_size[1]), int(image_size[0])) view_projection_matrix = tf.linalg.matmul(perspective_matrix, model_to_eye_matrix) triangle_index, _, mask = rasterization_backend.rasterize( vertices, triangles, view_projection_matrix, image_size_backend, backend) outputs = {"mask": mask, "triangle_indices": triangle_index} vertices = tf.gather(vertices, triangles, axis=-2) # Gather does not work on negative indices, which is the case for the pixel # associated to the background. triangle_index = triangle_index * mask # Extract batch shape in order to make sure it is preserved after `gather` # operation. batch_shape = triangle_index.shape[:-3] batch_shape = [_dim_value(dim) for dim in batch_shape] # Remove last dimension of `triangle_index` in order to make it compatible # with gather operations. triangle_index_lean = tf.squeeze(triangle_index, axis=-1) vertices_per_pixel = tf.gather( vertices, triangle_index_lean, axis=-3, batch_dims=len(batch_shape)) barycentrics = _perspective_correct_barycentrics(vertices_per_pixel, model_to_eye_matrix, perspective_matrix, image_size_float) mask_float = tf.cast(mask, vertices.dtype) outputs["barycentrics"] = mask_float * barycentrics for key, attribute in attributes.items(): attribute = tf.convert_to_tensor(value=attribute) outputs[key] = mask_float * _perspective_correct_attributes( attribute, barycentrics, triangles, triangle_index_lean, len(batch_shape)) return outputs # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements a differentiable rasterizer of triangular meshes. The resulting rendering contains perspective-correct interpolation of attributes defined at the vertices of the rasterized meshes. This rasterizer does not provide gradients through visibility, but it does through visible geometry and attributes. """ import tensorflow as tf from tensorflow_graphics.rendering import rasterization_backend from tensorflow_graphics.rendering.opengl import math as glm from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def _perspective_correct_barycentrics(vertices_per_pixel, model_to_eye_matrix, perspective_matrix, image_size_float): """Creates the pixels grid and computes barycentrics.""" # Construct the pixel grid with half-integer pixel centers. width = image_size_float[1] height = image_size_float[0] px = tf.linspace(0.5, width - 0.5, num=int(width)) py = tf.linspace(0.5, height - 0.5, num=int(height)) xv, yv = tf.meshgrid(px, py) pixel_position = tf.stack((xv, yv), axis=-1) return glm.perspective_correct_barycentrics(vertices_per_pixel, pixel_position, model_to_eye_matrix, perspective_matrix, (width, height)) def _perspective_correct_attributes(attribute, barycentrics, triangles, triangle_index, len_batch_shape): attribute = tf.gather(attribute, triangles, axis=-2) attribute_per_pixel = tf.gather( attribute, triangle_index, axis=-3, batch_dims=len_batch_shape) return glm.interpolate_attributes(attribute_per_pixel, barycentrics) def _dim_value(dim): return 1 if dim is None else tf.compat.v1.dimension_value(dim) def rasterize(vertices, triangles, attributes, model_to_eye_matrix, perspective_matrix, image_size, backend=rasterization_backend.RasterizationBackends.OPENGL, name=None): """Rasterizes the scene. Note: In the following, A1 to An are optional batch dimensions. Args: vertices: A tensor of shape `[A1, ..., An, V, 3]` containing batches of `V` vertices, each defined by a 3D point. triangles: A tensor of shape `[T, 3]` containing `T` triangles, each associated with 3 vertices from `vertices`. attributes: A dictionary of tensors, each of shape `[A1, ..., An, V, K_a]` containing batches of `V` vertices, each associated with K-dimensional attributes. K_a may vary by attribute. model_to_eye_matrix: A tensor of shape `[A1, ..., An, 4, 4]` containing batches of matrices used to transform vertices from model to eye coordinates. perspective_matrix: A tensor of shape `[A1, ..., An, 4, 4]` containing batches of matrices used to project vertices from eye to clip coordinates. image_size: A tuple (height, width) containing the dimensions in pixels of the rasterized image. backend: A rasterization_backend.RasterizationBackends enum containing the backend method to use for rasterization. name: A name for this op. Defaults to 'triangle_rasterizer_rasterize'. Returns: A dictionary. The key "mask" is of shape `[A1, ..., An, height, width, 1]` and stores a value of `0` of the pixel is assciated with the background, and `1` with the foreground. The key "barycentrics" is of shape `[A1, ..., An, height, width, 3]` and stores barycentric weights. Finally, the dictionary contains perspective correct interpolated attributes of shape `[A1, ..., An, height, width, K]` per entry in the `attributes` dictionary. """ with tf.compat.v1.name_scope(name, "triangle_rasterizer_rasterize", (vertices, triangles, attributes, model_to_eye_matrix, perspective_matrix)): vertices = tf.convert_to_tensor(value=vertices) triangles = tf.convert_to_tensor(value=triangles) model_to_eye_matrix = tf.convert_to_tensor(value=model_to_eye_matrix) perspective_matrix = tf.convert_to_tensor(value=perspective_matrix) shape.check_static( tensor=vertices, tensor_name="vertices", has_rank_greater_than=1, has_dim_equals=((-1, 3))) shape.check_static( tensor=triangles, tensor_name="triangles", has_rank=2, has_dim_equals=((-1, 3))) shape.check_static( tensor=model_to_eye_matrix, tensor_name="model_to_eye_matrix", has_dim_equals=(((-2, 4), (-1, 4)))) shape.check_static( tensor=perspective_matrix, tensor_name="perspective_matrix", has_dim_equals=(((-2, 4), (-1, 4)))) image_size_float = (float(image_size[0]), float(image_size[1])) image_size_backend = (int(image_size[1]), int(image_size[0])) view_projection_matrix = tf.linalg.matmul(perspective_matrix, model_to_eye_matrix) rasterized = rasterization_backend.rasterize(vertices, triangles, view_projection_matrix, image_size_backend, backend) outputs = { "mask": rasterized.foreground_mask, "triangle_indices": rasterized.triangle_id } # Extract batch shape in order to make sure it is preserved after `gather` # operation. batch_shape = rasterized.triangle_id.shape[:-3] batch_shape = [_dim_value(dim) for dim in batch_shape] vertices_per_pixel = tf.gather( vertices, rasterized.vertex_ids, batch_dims=len(batch_shape)) barycentrics = _perspective_correct_barycentrics(vertices_per_pixel, model_to_eye_matrix, perspective_matrix, image_size_float) mask_float = tf.cast(rasterized.foreground_mask, vertices.dtype) outputs["barycentrics"] = mask_float * barycentrics for key, attribute in attributes.items(): attribute = tf.convert_to_tensor(value=attribute) outputs[key] = mask_float * _perspective_correct_attributes( attribute, barycentrics, triangles, rasterized.triangle_id[..., 0], len(batch_shape)) return outputs # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/nn/metric/precision.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements the precision metric.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.util import export_api from tensorflow_graphics.util import safe_ops from tensorflow_graphics.util import shape def _cast_to_int(prediction): return tf.cast(x=prediction, dtype=tf.int32) def evaluate(ground_truth, prediction, classes=None, reduce_average=True, prediction_to_category_function=_cast_to_int, name=None): """Computes the precision metric for the given ground truth and predictions. Note: In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. Args: ground_truth: A tensor of shape `[A1, ..., An, N]`, where the last axis represents the ground truth labels. Will be cast to int32. prediction: A tensor of shape `[A1, ..., An, N]`, where the last axis represents the predictions (which can be continuous). classes: An integer or a list/tuple of integers representing the classes for which the precision will be evaluated. In case 'classes' is 'None', the number of classes will be inferred from the given labels and the precision will be calculated for each of the classes. Defaults to 'None'. reduce_average: Whether to calculate the average of the precision for each class and return a single precision value. Defaults to true. prediction_to_category_function: A function to associate a `prediction` to a category. Defaults to rounding down the value of the prediction to the nearest integer value. name: A name for this op. Defaults to "precision_evaluate". Returns: A tensor of shape `[A1, ..., An, C]`, where the last axis represents the precision calculated for each of the requested classes. Raises: ValueError: if the shape of `ground_truth`, `prediction` is not supported. """ with tf.compat.v1.name_scope(name, "precision_evaluate", [ground_truth, prediction]): ground_truth = tf.cast( x=tf.convert_to_tensor(value=ground_truth), dtype=tf.int32) prediction = tf.convert_to_tensor(value=prediction) shape.compare_batch_dimensions( tensors=(ground_truth, prediction), tensor_names=("ground_truth", "prediction"), last_axes=-1, broadcast_compatible=True) prediction = prediction_to_category_function(prediction) if classes is None: num_classes = tf.math.maximum( tf.math.reduce_max(input_tensor=ground_truth), tf.math.reduce_max(input_tensor=prediction)) + 1 classes = tf.range(num_classes) else: classes = tf.convert_to_tensor(value=classes) # Make sure classes is a tensor of rank 1. classes = tf.reshape(classes, [1]) if tf.rank(classes) == 0 else classes # Create a confusion matrix for each of the classes (with dimensions # [A1, ..., An, C, N]). classes = tf.expand_dims(classes, -1) ground_truth_per_class = tf.equal(tf.expand_dims(ground_truth, -2), classes) prediction_per_class = tf.equal(tf.expand_dims(prediction, -2), classes) # Calculate the precision for each of the classes. true_positives = tf.math.reduce_sum( input_tensor=tf.cast( x=tf.math.logical_and(ground_truth_per_class, prediction_per_class), dtype=tf.float32), axis=-1) total_predicted_positives = tf.math.reduce_sum( input_tensor=tf.cast(x=prediction_per_class, dtype=tf.float32), axis=-1) precision_per_class = safe_ops.safe_signed_div(true_positives, total_predicted_positives) if reduce_average: return tf.math.reduce_mean(input_tensor=precision_per_class, axis=-1) else: return precision_per_class # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements the precision metric.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.util import export_api from tensorflow_graphics.util import safe_ops from tensorflow_graphics.util import shape def _cast_to_int(prediction): return tf.cast(x=prediction, dtype=tf.int32) def evaluate(ground_truth, prediction, classes=None, reduce_average=True, prediction_to_category_function=_cast_to_int, name=None): """Computes the precision metric for the given ground truth and predictions. Note: In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. Args: ground_truth: A tensor of shape `[A1, ..., An, N]`, where the last axis represents the ground truth labels. Will be cast to int32. prediction: A tensor of shape `[A1, ..., An, N]`, where the last axis represents the predictions (which can be continuous). classes: An integer or a list/tuple of integers representing the classes for which the precision will be evaluated. In case 'classes' is 'None', the number of classes will be inferred from the given labels and the precision will be calculated for each of the classes. Defaults to 'None'. reduce_average: Whether to calculate the average of the precision for each class and return a single precision value. Defaults to true. prediction_to_category_function: A function to associate a `prediction` to a category. Defaults to rounding down the value of the prediction to the nearest integer value. name: A name for this op. Defaults to "precision_evaluate". Returns: A tensor of shape `[A1, ..., An, C]`, where the last axis represents the precision calculated for each of the requested classes. Raises: ValueError: if the shape of `ground_truth`, `prediction` is not supported. """ with tf.compat.v1.name_scope(name, "precision_evaluate", [ground_truth, prediction]): ground_truth = tf.cast( x=tf.convert_to_tensor(value=ground_truth), dtype=tf.int32) prediction = tf.convert_to_tensor(value=prediction) shape.compare_batch_dimensions( tensors=(ground_truth, prediction), tensor_names=("ground_truth", "prediction"), last_axes=-1, broadcast_compatible=True) prediction = prediction_to_category_function(prediction) if classes is None: num_classes = tf.math.maximum( tf.math.reduce_max(input_tensor=ground_truth), tf.math.reduce_max(input_tensor=prediction)) + 1 classes = tf.range(num_classes) else: classes = tf.convert_to_tensor(value=classes) # Make sure classes is a tensor of rank 1. classes = tf.reshape(classes, [1]) if tf.rank(classes) == 0 else classes # Create a confusion matrix for each of the classes (with dimensions # [A1, ..., An, C, N]). classes = tf.expand_dims(classes, -1) ground_truth_per_class = tf.equal(tf.expand_dims(ground_truth, -2), classes) prediction_per_class = tf.equal(tf.expand_dims(prediction, -2), classes) # Calculate the precision for each of the classes. true_positives = tf.math.reduce_sum( input_tensor=tf.cast( x=tf.math.logical_and(ground_truth_per_class, prediction_per_class), dtype=tf.float32), axis=-1) total_predicted_positives = tf.math.reduce_sum( input_tensor=tf.cast(x=prediction_per_class, dtype=tf.float32), axis=-1) precision_per_class = safe_ops.safe_signed_div(true_positives, total_predicted_positives) if reduce_average: return tf.math.reduce_mean(input_tensor=precision_per_class, axis=-1) else: return precision_per_class # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/util/tests/asserts_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for asserts.""" from absl.testing import flagsaver from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.util import asserts from tensorflow_graphics.util import test_case def _pick_random_vector(): """Creates a random vector with a random shape.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() return np.random.normal(size=tensor_shape + [4]) class AssertsTest(test_case.TestCase): @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_normalized_exception_not_raised(self, dtype): """Checks that assert_normalized raises no exceptions for valid input.""" vector = _pick_random_vector() vector = tf.convert_to_tensor(value=vector, dtype=dtype) norm_vector = vector / tf.norm(tensor=vector, axis=-1, keepdims=True) self.assert_exception_is_not_raised( asserts.assert_normalized, shapes=[], vector=norm_vector) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_normalized_exception_raised(self, dtype): """Checks that assert_normalized raises exceptions for invalid input.""" vector = _pick_random_vector() + 10.0 vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = tf.abs(vector) with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate(asserts.assert_normalized(vector)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_normalized_passthrough(self): """Checks that the assert is a passthrough when the flag is False.""" vector_input = _pick_random_vector() vector_output = asserts.assert_normalized(vector_input) self.assertIs(vector_input, vector_output) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_at_least_k_non_zero_entries_passthrough(self): """Checks that the assert is a passthrough when the flag is False.""" vector_input = _pick_random_vector() vector_output = asserts.assert_at_least_k_non_zero_entries(vector_input) self.assertIs(vector_input, vector_output) @parameterized.parameters( (None, None), (1e-3, tf.float16), (4e-19, tf.float32), (4e-154, tf.float64), ) def test_assert_nonzero_norm_exception_not_raised(self, value, dtype): """Checks that assert_nonzero_norm works for values above eps.""" if value is None: vector = _pick_random_vector() + 10.0 vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = tf.abs(vector) else: vector = tf.constant((value,), dtype=dtype) self.assert_exception_is_not_raised( asserts.assert_nonzero_norm, shapes=[], vector=vector) @parameterized.parameters( (1e-4, tf.float16), (1e-38, tf.float32), (1e-308, tf.float64), ) def test_assert_nonzero_norm_exception_raised(self, value, dtype): """Checks that assert_nonzero_norm fails for values below eps.""" vector = tf.constant((value,), dtype=dtype) with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate(asserts.assert_nonzero_norm(vector)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_nonzero_norm_passthrough(self): """Checks that the assert is a passthrough when the flag is False.""" vector_input = _pick_random_vector() vector_output = asserts.assert_nonzero_norm(vector_input) self.assertIs(vector_input, vector_output) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_all_above_exception_not_raised(self, dtype): """Checks that assert_all_above raises no exceptions for valid input.""" vector = _pick_random_vector() vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = vector * vector vector /= -tf.reduce_max(input_tensor=vector, axis=-1, keepdims=True) eps = asserts.select_eps_for_addition(dtype) inside_vector = vector + eps ones_vector = -tf.ones_like(vector) with self.subTest(name="inside_and_open_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_above, shapes=[], vector=inside_vector, minval=-1.0, open_bound=True) with self.subTest(name="inside_and_close_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_above, shapes=[], vector=inside_vector, minval=-1.0, open_bound=False) with self.subTest(name="exact_and_close_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_above, shapes=[], vector=ones_vector, minval=-1.0, open_bound=False) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_all_above_exception_raised(self, dtype): """Checks that assert_all_above raises exceptions for invalid input.""" vector = _pick_random_vector() vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = vector * vector vector /= -tf.reduce_max(input_tensor=vector, axis=-1, keepdims=True) eps = asserts.select_eps_for_addition(dtype) outside_vector = vector - eps ones_vector = -tf.ones_like(vector) with self.subTest(name="outside_and_open_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_above(outside_vector, -1.0, open_bound=True)) with self.subTest(name="outside_and_close_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_above(outside_vector, -1.0, open_bound=False)) with self.subTest(name="exact_and_open_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_above(ones_vector, -1.0, open_bound=True)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_all_above_passthrough(self): """Checks that the assert is a passthrough when the flag is False.""" vector_input = _pick_random_vector() vector_output = asserts.assert_all_above(vector_input, 1.0) self.assertIs(vector_input, vector_output) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_all_below_exception_not_raised(self, dtype): """Checks that assert_all_below raises no exceptions for valid input.""" vector = _pick_random_vector() vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = vector * vector vector /= tf.reduce_max(input_tensor=vector, axis=-1, keepdims=True) eps = asserts.select_eps_for_addition(dtype) inside_vector = vector - eps ones_vector = tf.ones_like(vector) with self.subTest(name="inside_and_open_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_below, shapes=[], vector=inside_vector, maxval=1.0, open_bound=True) with self.subTest(name="inside_and_close_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_below, shapes=[], vector=inside_vector, maxval=1.0, open_bound=False) with self.subTest(name="exact_and_close_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_below, shapes=[], vector=ones_vector, maxval=1.0, open_bound=False) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_all_below_exception_raised(self, dtype): """Checks that assert_all_below raises exceptions for invalid input.""" vector = _pick_random_vector() vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = vector * vector vector /= tf.reduce_max(input_tensor=vector, axis=-1, keepdims=True) eps = asserts.select_eps_for_addition(dtype) outside_vector = vector + eps ones_vector = tf.ones_like(vector) with self.subTest(name="outside_and_open_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_below(outside_vector, 1.0, open_bound=True)) with self.subTest(name="outside_and_close_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_below(outside_vector, 1.0, open_bound=False)) with self.subTest(name="exact_and_open_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_below(ones_vector, 1.0, open_bound=True)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_all_below_passthrough(self): """Checks that the assert is a passthrough when the flag is False.""" vector_input = _pick_random_vector() vector_output = asserts.assert_all_below(vector_input, 0.0) self.assertIs(vector_input, vector_output) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_all_in_range_exception_not_raised(self, dtype): """Checks that assert_all_in_range raises no exceptions for valid input.""" vector = _pick_random_vector() vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = vector * vector vector /= tf.reduce_max(input_tensor=vector, axis=-1, keepdims=True) eps = asserts.select_eps_for_addition(dtype) inside_vector = vector - eps ones_vector = tf.ones_like(vector) with self.subTest(name="inside_and_open_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_in_range, shapes=[], vector=inside_vector, minval=-1.0, maxval=1.0, open_bounds=True) with self.subTest(name="inside_and_close_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_in_range, shapes=[], vector=inside_vector, minval=-1.0, maxval=1.0, open_bounds=False) with self.subTest(name="exact_and_close_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_in_range, shapes=[], vector=ones_vector, minval=-1.0, maxval=1.0, open_bounds=False) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_all_in_range_exception_raised(self, dtype): """Checks that assert_all_in_range raises exceptions for invalid input.""" vector = _pick_random_vector() vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = vector * vector vector /= tf.reduce_max(input_tensor=vector, axis=-1, keepdims=True) eps = asserts.select_eps_for_addition(dtype) outside_vector = vector + eps ones_vector = tf.ones_like(vector) with self.subTest(name="outside_and_open_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_in_range( outside_vector, -1.0, 1.0, open_bounds=True)) with self.subTest(name="outside_and_close_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_in_range( outside_vector, -1.0, 1.0, open_bounds=False)) with self.subTest(name="exact_and_open_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_in_range( ones_vector, -1.0, 1.0, open_bounds=True)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_all_in_range_passthrough(self): """Checks that the assert is a passthrough when the flag is False.""" vector_input = _pick_random_vector() vector_output = asserts.assert_all_in_range(vector_input, -1.0, 1.0) self.assertIs(vector_input, vector_output) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_select_eps_for_division(self, dtype): """Checks that select_eps_for_division does not cause Inf values.""" a = tf.constant(1.0, dtype=dtype) eps = asserts.select_eps_for_division(dtype) self.assert_exception_is_not_raised( asserts.assert_no_infs_or_nans, shapes=[], tensor=a / eps) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_select_eps_for_addition(self, dtype): """Checks that select_eps_for_addition returns large enough eps.""" a = tf.constant(1.0, dtype=dtype) eps = asserts.select_eps_for_addition(dtype) with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate(tf.compat.v1.assert_equal(a, a + eps)) @parameterized.parameters((np.NaN,), (np.inf,)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_no_infs_or_nans_passthrough(self, value): """Checks that the assert is a passthrough when the flag is False.""" vector_input = (value,) vector_output = asserts.assert_no_infs_or_nans(vector_input) self.assertIs(vector_input, vector_output) @parameterized.parameters((np.NaN,), (np.inf,)) def test_assert_no_infs_or_nans_raises_exception_for_nan(self, value): """Checks that the assert works for `Inf` or `NaN` values.""" vector_input = (value,) with self.assertRaisesRegex( # pylint: disable=g-error-prone-assert-raises tf.errors.InvalidArgumentError, "Inf or NaN detected."): self.evaluate(asserts.assert_no_infs_or_nans(vector_input)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_binary_passthrough(self): """Checks that the assert is a passthrough when the flag is False.""" vector_input = _pick_random_vector() vector_output = asserts.assert_binary(vector_input) self.assertIs(vector_input, vector_output) # pylint: disable=g-error-prone-assert-raises @parameterized.parameters(tf.float16, tf.float32, tf.float64, tf.int16, tf.int32, tf.int64) def test_assert_binary_exception_raised(self, dtype): """Checks that assert_binary raises exceptions for invalid input.""" tensor_size = np.random.randint(3) + 1 tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() num_elements = np.prod(tensor_shape) # Vector with all ones except for a single negative entry. vector_with_negative = np.ones(num_elements) vector_with_negative[np.random.randint(num_elements)] = -1 vector_with_negative = vector_with_negative.reshape(tensor_shape) vector_with_negative = tf.convert_to_tensor( value=vector_with_negative, dtype=dtype) # Vector with all zeros except for a single 0.5 (or 2 in case dtype=int). vector = np.zeros(num_elements) vector[np.random.randint(num_elements)] = 2 vector = vector.reshape(tensor_shape) vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = vector - tf.compat.v1.div(vector, 4) * 3 with self.subTest(name="has_negative_number"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate(asserts.assert_binary(vector_with_negative)) with self.subTest(name="has_non_binary_number"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate(asserts.assert_binary(vector)) @parameterized.parameters(tf.float16, tf.float32, tf.float64, tf.int16, tf.int32, tf.int64) def test_assert_binary_exception_not_raised(self, dtype): """Checks that assert_binary raises no exceptions for valid input.""" tensor_size = np.random.randint(3) + 1 tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() # Vector with random zeros and ones. vector = np.random.randint(2, size=tensor_shape) vector = tf.convert_to_tensor(value=vector, dtype=dtype) self.assert_exception_is_not_raised( asserts.assert_binary, shapes=[], tensor=vector) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for asserts.""" from absl.testing import flagsaver from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.util import asserts from tensorflow_graphics.util import test_case def _pick_random_vector(): """Creates a random vector with a random shape.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() return np.random.normal(size=tensor_shape + [4]) class AssertsTest(test_case.TestCase): @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_normalized_exception_not_raised(self, dtype): """Checks that assert_normalized raises no exceptions for valid input.""" vector = _pick_random_vector() vector = tf.convert_to_tensor(value=vector, dtype=dtype) norm_vector = vector / tf.norm(tensor=vector, axis=-1, keepdims=True) self.assert_exception_is_not_raised( asserts.assert_normalized, shapes=[], vector=norm_vector) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_normalized_exception_raised(self, dtype): """Checks that assert_normalized raises exceptions for invalid input.""" vector = _pick_random_vector() + 10.0 vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = tf.abs(vector) with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate(asserts.assert_normalized(vector)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_normalized_passthrough(self): """Checks that the assert is a passthrough when the flag is False.""" vector_input = _pick_random_vector() vector_output = asserts.assert_normalized(vector_input) self.assertIs(vector_input, vector_output) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_at_least_k_non_zero_entries_passthrough(self): """Checks that the assert is a passthrough when the flag is False.""" vector_input = _pick_random_vector() vector_output = asserts.assert_at_least_k_non_zero_entries(vector_input) self.assertIs(vector_input, vector_output) @parameterized.parameters( (None, None), (1e-3, tf.float16), (4e-19, tf.float32), (4e-154, tf.float64), ) def test_assert_nonzero_norm_exception_not_raised(self, value, dtype): """Checks that assert_nonzero_norm works for values above eps.""" if value is None: vector = _pick_random_vector() + 10.0 vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = tf.abs(vector) else: vector = tf.constant((value,), dtype=dtype) self.assert_exception_is_not_raised( asserts.assert_nonzero_norm, shapes=[], vector=vector) @parameterized.parameters( (1e-4, tf.float16), (1e-38, tf.float32), (1e-308, tf.float64), ) def test_assert_nonzero_norm_exception_raised(self, value, dtype): """Checks that assert_nonzero_norm fails for values below eps.""" vector = tf.constant((value,), dtype=dtype) with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate(asserts.assert_nonzero_norm(vector)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_nonzero_norm_passthrough(self): """Checks that the assert is a passthrough when the flag is False.""" vector_input = _pick_random_vector() vector_output = asserts.assert_nonzero_norm(vector_input) self.assertIs(vector_input, vector_output) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_all_above_exception_not_raised(self, dtype): """Checks that assert_all_above raises no exceptions for valid input.""" vector = _pick_random_vector() vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = vector * vector vector /= -tf.reduce_max(input_tensor=vector, axis=-1, keepdims=True) eps = asserts.select_eps_for_addition(dtype) inside_vector = vector + eps ones_vector = -tf.ones_like(vector) with self.subTest(name="inside_and_open_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_above, shapes=[], vector=inside_vector, minval=-1.0, open_bound=True) with self.subTest(name="inside_and_close_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_above, shapes=[], vector=inside_vector, minval=-1.0, open_bound=False) with self.subTest(name="exact_and_close_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_above, shapes=[], vector=ones_vector, minval=-1.0, open_bound=False) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_all_above_exception_raised(self, dtype): """Checks that assert_all_above raises exceptions for invalid input.""" vector = _pick_random_vector() vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = vector * vector vector /= -tf.reduce_max(input_tensor=vector, axis=-1, keepdims=True) eps = asserts.select_eps_for_addition(dtype) outside_vector = vector - eps ones_vector = -tf.ones_like(vector) with self.subTest(name="outside_and_open_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_above(outside_vector, -1.0, open_bound=True)) with self.subTest(name="outside_and_close_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_above(outside_vector, -1.0, open_bound=False)) with self.subTest(name="exact_and_open_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_above(ones_vector, -1.0, open_bound=True)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_all_above_passthrough(self): """Checks that the assert is a passthrough when the flag is False.""" vector_input = _pick_random_vector() vector_output = asserts.assert_all_above(vector_input, 1.0) self.assertIs(vector_input, vector_output) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_all_below_exception_not_raised(self, dtype): """Checks that assert_all_below raises no exceptions for valid input.""" vector = _pick_random_vector() vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = vector * vector vector /= tf.reduce_max(input_tensor=vector, axis=-1, keepdims=True) eps = asserts.select_eps_for_addition(dtype) inside_vector = vector - eps ones_vector = tf.ones_like(vector) with self.subTest(name="inside_and_open_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_below, shapes=[], vector=inside_vector, maxval=1.0, open_bound=True) with self.subTest(name="inside_and_close_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_below, shapes=[], vector=inside_vector, maxval=1.0, open_bound=False) with self.subTest(name="exact_and_close_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_below, shapes=[], vector=ones_vector, maxval=1.0, open_bound=False) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_all_below_exception_raised(self, dtype): """Checks that assert_all_below raises exceptions for invalid input.""" vector = _pick_random_vector() vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = vector * vector vector /= tf.reduce_max(input_tensor=vector, axis=-1, keepdims=True) eps = asserts.select_eps_for_addition(dtype) outside_vector = vector + eps ones_vector = tf.ones_like(vector) with self.subTest(name="outside_and_open_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_below(outside_vector, 1.0, open_bound=True)) with self.subTest(name="outside_and_close_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_below(outside_vector, 1.0, open_bound=False)) with self.subTest(name="exact_and_open_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_below(ones_vector, 1.0, open_bound=True)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_all_below_passthrough(self): """Checks that the assert is a passthrough when the flag is False.""" vector_input = _pick_random_vector() vector_output = asserts.assert_all_below(vector_input, 0.0) self.assertIs(vector_input, vector_output) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_all_in_range_exception_not_raised(self, dtype): """Checks that assert_all_in_range raises no exceptions for valid input.""" vector = _pick_random_vector() vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = vector * vector vector /= tf.reduce_max(input_tensor=vector, axis=-1, keepdims=True) eps = asserts.select_eps_for_addition(dtype) inside_vector = vector - eps ones_vector = tf.ones_like(vector) with self.subTest(name="inside_and_open_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_in_range, shapes=[], vector=inside_vector, minval=-1.0, maxval=1.0, open_bounds=True) with self.subTest(name="inside_and_close_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_in_range, shapes=[], vector=inside_vector, minval=-1.0, maxval=1.0, open_bounds=False) with self.subTest(name="exact_and_close_bounds"): self.assert_exception_is_not_raised( asserts.assert_all_in_range, shapes=[], vector=ones_vector, minval=-1.0, maxval=1.0, open_bounds=False) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_assert_all_in_range_exception_raised(self, dtype): """Checks that assert_all_in_range raises exceptions for invalid input.""" vector = _pick_random_vector() vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = vector * vector vector /= tf.reduce_max(input_tensor=vector, axis=-1, keepdims=True) eps = asserts.select_eps_for_addition(dtype) outside_vector = vector + eps ones_vector = tf.ones_like(vector) with self.subTest(name="outside_and_open_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_in_range( outside_vector, -1.0, 1.0, open_bounds=True)) with self.subTest(name="outside_and_close_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_in_range( outside_vector, -1.0, 1.0, open_bounds=False)) with self.subTest(name="exact_and_open_bounds"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( asserts.assert_all_in_range( ones_vector, -1.0, 1.0, open_bounds=True)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_all_in_range_passthrough(self): """Checks that the assert is a passthrough when the flag is False.""" vector_input = _pick_random_vector() vector_output = asserts.assert_all_in_range(vector_input, -1.0, 1.0) self.assertIs(vector_input, vector_output) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_select_eps_for_division(self, dtype): """Checks that select_eps_for_division does not cause Inf values.""" a = tf.constant(1.0, dtype=dtype) eps = asserts.select_eps_for_division(dtype) self.assert_exception_is_not_raised( asserts.assert_no_infs_or_nans, shapes=[], tensor=a / eps) @parameterized.parameters(tf.float16, tf.float32, tf.float64) def test_select_eps_for_addition(self, dtype): """Checks that select_eps_for_addition returns large enough eps.""" a = tf.constant(1.0, dtype=dtype) eps = asserts.select_eps_for_addition(dtype) with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate(tf.compat.v1.assert_equal(a, a + eps)) @parameterized.parameters((np.NaN,), (np.inf,)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_no_infs_or_nans_passthrough(self, value): """Checks that the assert is a passthrough when the flag is False.""" vector_input = (value,) vector_output = asserts.assert_no_infs_or_nans(vector_input) self.assertIs(vector_input, vector_output) @parameterized.parameters((np.NaN,), (np.inf,)) def test_assert_no_infs_or_nans_raises_exception_for_nan(self, value): """Checks that the assert works for `Inf` or `NaN` values.""" vector_input = (value,) with self.assertRaisesRegex( # pylint: disable=g-error-prone-assert-raises tf.errors.InvalidArgumentError, "Inf or NaN detected."): self.evaluate(asserts.assert_no_infs_or_nans(vector_input)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_assert_binary_passthrough(self): """Checks that the assert is a passthrough when the flag is False.""" vector_input = _pick_random_vector() vector_output = asserts.assert_binary(vector_input) self.assertIs(vector_input, vector_output) # pylint: disable=g-error-prone-assert-raises @parameterized.parameters(tf.float16, tf.float32, tf.float64, tf.int16, tf.int32, tf.int64) def test_assert_binary_exception_raised(self, dtype): """Checks that assert_binary raises exceptions for invalid input.""" tensor_size = np.random.randint(3) + 1 tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() num_elements = np.prod(tensor_shape) # Vector with all ones except for a single negative entry. vector_with_negative = np.ones(num_elements) vector_with_negative[np.random.randint(num_elements)] = -1 vector_with_negative = vector_with_negative.reshape(tensor_shape) vector_with_negative = tf.convert_to_tensor( value=vector_with_negative, dtype=dtype) # Vector with all zeros except for a single 0.5 (or 2 in case dtype=int). vector = np.zeros(num_elements) vector[np.random.randint(num_elements)] = 2 vector = vector.reshape(tensor_shape) vector = tf.convert_to_tensor(value=vector, dtype=dtype) vector = vector - tf.compat.v1.div(vector, 4) * 3 with self.subTest(name="has_negative_number"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate(asserts.assert_binary(vector_with_negative)) with self.subTest(name="has_non_binary_number"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate(asserts.assert_binary(vector)) @parameterized.parameters(tf.float16, tf.float32, tf.float64, tf.int16, tf.int32, tf.int64) def test_assert_binary_exception_not_raised(self, dtype): """Checks that assert_binary raises no exceptions for valid input.""" tensor_size = np.random.randint(3) + 1 tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() # Vector with random zeros and ones. vector = np.random.randint(2, size=tensor_shape) vector = tf.convert_to_tensor(value=vector, dtype=dtype) self.assert_exception_is_not_raised( asserts.assert_binary, shapes=[], tensor=vector) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/geometry/transformation/tests/axis_angle_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for axis-angle.""" from absl.testing import flagsaver from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.transformation import axis_angle from tensorflow_graphics.geometry.transformation import quaternion from tensorflow_graphics.geometry.transformation import rotation_matrix_3d from tensorflow_graphics.geometry.transformation.tests import test_helpers from tensorflow_graphics.util import test_case class AxisAngleTest(test_case.TestCase): @parameterized.parameters( ((3,),), ((None, 3),), ) def test_from_euler_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(axis_angle.from_euler, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (None,)),) def test_from_euler_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(axis_angle.from_euler, error_msg, shapes) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_euler_jacobian_random(self): """Test the Jacobian of the from_euler function. Note: Preset angles are not tested as the gradient of tf.norm is NaN at 0. """ x_init = test_helpers.generate_random_test_euler_angles() self.assert_jacobian_is_finite_fn(lambda x: axis_angle.from_euler(x)[0], [x_init]) self.assert_jacobian_is_finite_fn(lambda x: axis_angle.from_euler(x)[1], [x_init]) def test_from_euler_random(self): """Tests that from_euler allows to perform the expect rotation of points.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() tensor_shape = random_euler_angles.shape[:-1] random_point = np.random.normal(size=tensor_shape + (3,)) random_matrix = rotation_matrix_3d.from_euler(random_euler_angles) random_axis, random_angle = axis_angle.from_euler(random_euler_angles) rotated_with_matrix = rotation_matrix_3d.rotate(random_point, random_matrix) rotated_with_axis_angle = axis_angle.rotate(random_point, random_axis, random_angle) self.assertAllClose(rotated_with_matrix, rotated_with_axis_angle) @parameterized.parameters( ((3,),), ((None, 3),), ((2, 3),), ) def test_from_euler_with_small_angles_approximation_exception_not_raised( self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised( axis_angle.from_euler_with_small_angles_approximation, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (None,)),) def test_from_euler_with_small_angles_approximation_exception_raised( self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised( axis_angle.from_euler_with_small_angles_approximation, error_msg, shapes) def test_from_euler_normalized_preset(self): """Tests that from_euler allows build normalized axis-angles.""" euler_angles = test_helpers.generate_preset_test_euler_angles() axis, angle = axis_angle.from_euler(euler_angles) self.assertAllEqual( axis_angle.is_normalized(axis, angle), np.ones(angle.shape, dtype=bool)) def test_from_euler_normalized_random(self): """Tests that from_euler allows build normalized axis-angles.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() random_axis, random_angle = axis_angle.from_euler(random_euler_angles) self.assertAllEqual( axis_angle.is_normalized(random_axis, random_angle), np.ones(shape=random_angle.shape)) def test_from_euler_with_small_angles_approximation_random(self): # Only generate small angles. For a test tolerance of 1e-3, 0.23 was found # empirically to be the range where the small angle approximation works. random_euler_angles = test_helpers.generate_random_test_euler_angles( min_angle=-0.23, max_angle=0.23) exact_axis_angle = axis_angle.from_euler(random_euler_angles) approximate_axis_angle = ( axis_angle.from_euler_with_small_angles_approximation( random_euler_angles)) self.assertAllClose(exact_axis_angle, approximate_axis_angle, atol=1e-3) @parameterized.parameters( ((4,),), ((None, 4),), ((2, 4),), ) def test_from_quaternion_exception_not_raised(self, *shape): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(axis_angle.from_quaternion, shape) @parameterized.parameters( ("must have exactly 4 dimensions in axis -1", (None,)),) def test_from_quaternion_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(axis_angle.from_quaternion, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_quaternion_jacobian_random(self): """Test the Jacobian of the from_quaternion function. Note: Preset angles are not tested as the gradient of tf.norm is NaN a 0. """ x_init = test_helpers.generate_random_test_quaternions() self.assert_jacobian_is_finite_fn( lambda x: axis_angle.from_quaternion(x)[0], [x_init]) self.assert_jacobian_is_finite_fn( lambda x: axis_angle.from_quaternion(x)[1], [x_init]) def test_from_quaternion_normalized_preset(self): """Tests that from_quaternion returns normalized axis-angles.""" euler_angles = test_helpers.generate_preset_test_euler_angles() quat = quaternion.from_euler(euler_angles) axis, angle = axis_angle.from_quaternion(quat) self.assertAllEqual( axis_angle.is_normalized(axis, angle), np.ones(angle.shape, dtype=bool)) def test_from_quaternion_normalized_random(self): """Tests that from_quaternion returns normalized axis-angles.""" random_quaternions = test_helpers.generate_random_test_quaternions() random_axis, random_angle = axis_angle.from_quaternion(random_quaternions) self.assertAllEqual( axis_angle.is_normalized(random_axis, random_angle), np.ones(random_angle.shape)) def test_from_quaternion_preset(self): """Tests that axis_angle.from_quaternion produces the expected result.""" preset_euler_angles = test_helpers.generate_preset_test_euler_angles() preset_quaternions = quaternion.from_euler(preset_euler_angles) preset_axis_angle = axis_angle.from_euler(preset_euler_angles) self.assertAllClose( preset_axis_angle, axis_angle.from_quaternion(preset_quaternions), rtol=1e-3) def test_from_quaternion_random(self): """Tests that axis_angle.from_quaternion produces the expected result.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() random_quaternions = quaternion.from_euler(random_euler_angles) random_axis_angle = axis_angle.from_euler(random_euler_angles) self.assertAllClose( random_axis_angle, axis_angle.from_quaternion(random_quaternions), rtol=1e-3) @parameterized.parameters( ((3, 3),), ((None, 3, 3),), ) def test_from_rotation_matrix_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(axis_angle.from_rotation_matrix, shapes) @parameterized.parameters( ("must have a rank greater than 1", (3,)), ("must have exactly 3 dimensions in axis -1", (3, None)), ("must have exactly 3 dimensions in axis -2", (None, 3)), ) def test_from_rotation_matrix_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(axis_angle.from_rotation_matrix, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_rotation_matrix_jacobian_random(self): """Test the Jacobian of the from_rotation_matrix function. Note: Preset angles are not tested as the gradient of tf.norm is NaN a 0. """ x_init = test_helpers.generate_random_test_rotation_matrix_3d() self.assert_jacobian_is_finite_fn( lambda x: axis_angle.from_rotation_matrix(x)[0], [x_init]) self.assert_jacobian_is_finite_fn( lambda x: axis_angle.from_rotation_matrix(x)[1], [x_init]) def test_from_rotation_matrix_normalized_preset(self): """Tests that from_rotation_matrix returns normalized axis-angles.""" preset_euler_angles = test_helpers.generate_preset_test_euler_angles() matrix = rotation_matrix_3d.from_euler(preset_euler_angles) axis, angle = axis_angle.from_rotation_matrix(matrix) self.assertAllEqual( axis_angle.is_normalized(axis, angle), np.ones(angle.shape, dtype=bool)) def test_from_rotation_matrix_normalized_random(self): """Tests that from_rotation_matrix returns normalized axis-angles.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() matrix = rotation_matrix_3d.from_euler(random_euler_angles) axis, angle = axis_angle.from_rotation_matrix(matrix) self.assertAllEqual( axis_angle.is_normalized(axis, angle), np.ones(angle.shape, dtype=bool)) def test_from_rotation_matrix_random(self): """Tests rotation around Z axis.""" def get_rotation_matrix_around_z(angle_rad): return np.array([ [np.cos(angle_rad), -np.sin(angle_rad), 0], [np.sin(angle_rad), np.cos(angle_rad), 0], [0, 0, 1], ]) tensor_size = np.random.randint(10) angle = ( np.array([ np.deg2rad(np.random.randint(720) - 360) for _ in range(tensor_size) ]).reshape((tensor_size, 1))) rotation_matrix = [get_rotation_matrix_around_z(i[0]) for i in angle] rotation_matrix = np.array(rotation_matrix).reshape((tensor_size, 3, 3)) tf_axis, tf_angle = axis_angle.from_rotation_matrix(rotation_matrix) axis = np.tile([[0., 0., 1.]], (angle.shape[0], 1)) tf_quat_gt = quaternion.from_axis_angle(axis, angle) tf_quat = quaternion.from_axis_angle(tf_axis, tf_angle) # Compare quaternions since axis orientation and angle ambiguity will # lead to more complex comparisons. for quat_gt, quat in zip(self.evaluate(tf_quat_gt), self.evaluate(tf_quat)): # Remember that q=-q for any quaternion. pos = np.allclose(quat_gt, quat) neg = np.allclose(quat_gt, -quat) self.assertTrue(pos or neg) @parameterized.parameters( ((3,), (1,)), ((None, 3), (None, 1)), ((2, 3), (2, 1)), ((1, 3), (1,)), ((3,), (1, 1)), ) def test_inverse_exception_not_raised(self, *shape): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(axis_angle.inverse, shape) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (None,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (None,)), ) def test_inverse_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(axis_angle.inverse, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_inverse_jacobian_preset(self): """Test the Jacobian of the inverse function.""" x_axis_init, x_angle_init = test_helpers.generate_preset_test_axis_angle() if tf.executing_eagerly(): # Because axis is returned as is, gradient calculation fails in graph mode # but not in eager mode. This is a side effect of having a graph rather # than a problem of the function. with self.subTest("axis"): self.assert_jacobian_is_correct_fn( lambda x: axis_angle.inverse(x, x_angle_init)[0], [x_axis_init]) with self.subTest("angle"): self.assert_jacobian_is_correct_fn( lambda x: axis_angle.inverse(x_axis_init, x)[1], [x_angle_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_inverse_jacobian_random(self): """Test the Jacobian of the inverse function.""" x_axis_init, x_angle_init = test_helpers.generate_random_test_axis_angle() if tf.executing_eagerly(): # Because axis is returned as is, gradient calculation fails in graph mode # but not in eager mode. This is a side effect of having a graph rather # than a problem of the function. with self.subTest("axis"): self.assert_jacobian_is_correct_fn( lambda x: axis_angle.inverse(1.0 * x, x_angle_init)[0], [x_axis_init]) with self.subTest("angle"): self.assert_jacobian_is_correct_fn( lambda x: axis_angle.inverse(x_axis_init, x)[1], [x_angle_init]) def test_inverse_normalized_random(self): """Tests that axis-angle inversion return a normalized axis-angle.""" random_axis, random_angle = test_helpers.generate_random_test_axis_angle() inverse_axis, inverse_angle = axis_angle.inverse(random_axis, random_angle) self.assertAllEqual( axis_angle.is_normalized(inverse_axis, inverse_angle), np.ones(random_angle.shape)) def test_inverse_random(self): """Tests axis-angle inversion.""" random_axis, random_angle = test_helpers.generate_random_test_axis_angle() inverse_axis, inverse_angle = axis_angle.inverse(random_axis, random_angle) self.assertAllClose(inverse_axis, random_axis, rtol=1e-3) self.assertAllClose(inverse_angle, -random_angle, rtol=1e-3) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (None,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (None,)), ) def test_is_normalized_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(axis_angle.is_normalized, error_msg, shape) def test_is_normalized_random(self): """Tests that is_normalized works as intended.""" # Samples normalized axis-angles. random_euler_angles = test_helpers.generate_random_test_euler_angles() with self.subTest(name=("is_normalized")): random_axis, random_angle = axis_angle.from_euler(random_euler_angles) pred = axis_angle.is_normalized(random_axis, random_angle) self.assertAllEqual(np.ones(shape=random_angle.shape, dtype=bool), pred) with self.subTest(name=("is_not_normalized")): random_axis *= 1.01 pred = axis_angle.is_normalized(random_axis, random_angle) self.assertAllEqual(np.zeros(shape=random_angle.shape, dtype=bool), pred) @parameterized.parameters( ((3,), (3,), (1,)), ((None, 3), (None, 3), (None, 1)), ((2, 3), (2, 3), (2, 1)), ((3,), (1, 3), (1, 2, 1)), ((1, 2, 3), (1, 3), (1,)), ((3,), (1, 3), (1,)), ) def test_rotate_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(axis_angle.rotate, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (2,), (3,), (1,)), ("must have exactly 3 dimensions in axis -1", (3,), (2,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (3,), (2,)), ) def test_rotate_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(axis_angle.rotate, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_rotate_jacobian_preset(self): """Test the Jacobian of the rotate function.""" x_axis_init, x_angle_init = test_helpers.generate_preset_test_axis_angle() x_point_init = np.random.uniform(size=x_axis_init.shape) self.assert_jacobian_is_correct_fn( axis_angle.rotate, [x_point_init, x_axis_init, x_angle_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_rotate_jacobian_random(self): """Test the Jacobian of the rotate function.""" x_axis_init, x_angle_init = test_helpers.generate_random_test_axis_angle() x_point_init = np.random.uniform(size=x_axis_init.shape) self.assert_jacobian_is_correct_fn( axis_angle.rotate, [x_point_init, x_axis_init, x_angle_init]) def test_rotate_random(self): """Tests that the rotate provide the same results as quaternion.rotate.""" random_axis, random_angle = test_helpers.generate_random_test_axis_angle() tensor_shape = random_angle.shape[:-1] random_point = np.random.normal(size=tensor_shape + (3,)) random_quaternion = quaternion.from_axis_angle(random_axis, random_angle) ground_truth = quaternion.rotate(random_point, random_quaternion) prediction = axis_angle.rotate(random_point, random_axis, random_angle) self.assertAllClose(ground_truth, prediction, rtol=1e-6) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for axis-angle.""" from absl.testing import flagsaver from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.transformation import axis_angle from tensorflow_graphics.geometry.transformation import quaternion from tensorflow_graphics.geometry.transformation import rotation_matrix_3d from tensorflow_graphics.geometry.transformation.tests import test_helpers from tensorflow_graphics.util import test_case class AxisAngleTest(test_case.TestCase): @parameterized.parameters( ((3,),), ((None, 3),), ) def test_from_euler_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(axis_angle.from_euler, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (None,)),) def test_from_euler_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(axis_angle.from_euler, error_msg, shapes) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_euler_jacobian_random(self): """Test the Jacobian of the from_euler function. Note: Preset angles are not tested as the gradient of tf.norm is NaN at 0. """ x_init = test_helpers.generate_random_test_euler_angles() self.assert_jacobian_is_finite_fn(lambda x: axis_angle.from_euler(x)[0], [x_init]) self.assert_jacobian_is_finite_fn(lambda x: axis_angle.from_euler(x)[1], [x_init]) def test_from_euler_random(self): """Tests that from_euler allows to perform the expect rotation of points.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() tensor_shape = random_euler_angles.shape[:-1] random_point = np.random.normal(size=tensor_shape + (3,)) random_matrix = rotation_matrix_3d.from_euler(random_euler_angles) random_axis, random_angle = axis_angle.from_euler(random_euler_angles) rotated_with_matrix = rotation_matrix_3d.rotate(random_point, random_matrix) rotated_with_axis_angle = axis_angle.rotate(random_point, random_axis, random_angle) self.assertAllClose(rotated_with_matrix, rotated_with_axis_angle) @parameterized.parameters( ((3,),), ((None, 3),), ((2, 3),), ) def test_from_euler_with_small_angles_approximation_exception_not_raised( self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised( axis_angle.from_euler_with_small_angles_approximation, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (None,)),) def test_from_euler_with_small_angles_approximation_exception_raised( self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised( axis_angle.from_euler_with_small_angles_approximation, error_msg, shapes) def test_from_euler_normalized_preset(self): """Tests that from_euler allows build normalized axis-angles.""" euler_angles = test_helpers.generate_preset_test_euler_angles() axis, angle = axis_angle.from_euler(euler_angles) self.assertAllEqual( axis_angle.is_normalized(axis, angle), np.ones(angle.shape, dtype=bool)) def test_from_euler_normalized_random(self): """Tests that from_euler allows build normalized axis-angles.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() random_axis, random_angle = axis_angle.from_euler(random_euler_angles) self.assertAllEqual( axis_angle.is_normalized(random_axis, random_angle), np.ones(shape=random_angle.shape)) def test_from_euler_with_small_angles_approximation_random(self): # Only generate small angles. For a test tolerance of 1e-3, 0.23 was found # empirically to be the range where the small angle approximation works. random_euler_angles = test_helpers.generate_random_test_euler_angles( min_angle=-0.23, max_angle=0.23) exact_axis_angle = axis_angle.from_euler(random_euler_angles) approximate_axis_angle = ( axis_angle.from_euler_with_small_angles_approximation( random_euler_angles)) self.assertAllClose(exact_axis_angle, approximate_axis_angle, atol=1e-3) @parameterized.parameters( ((4,),), ((None, 4),), ((2, 4),), ) def test_from_quaternion_exception_not_raised(self, *shape): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(axis_angle.from_quaternion, shape) @parameterized.parameters( ("must have exactly 4 dimensions in axis -1", (None,)),) def test_from_quaternion_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(axis_angle.from_quaternion, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_quaternion_jacobian_random(self): """Test the Jacobian of the from_quaternion function. Note: Preset angles are not tested as the gradient of tf.norm is NaN a 0. """ x_init = test_helpers.generate_random_test_quaternions() self.assert_jacobian_is_finite_fn( lambda x: axis_angle.from_quaternion(x)[0], [x_init]) self.assert_jacobian_is_finite_fn( lambda x: axis_angle.from_quaternion(x)[1], [x_init]) def test_from_quaternion_normalized_preset(self): """Tests that from_quaternion returns normalized axis-angles.""" euler_angles = test_helpers.generate_preset_test_euler_angles() quat = quaternion.from_euler(euler_angles) axis, angle = axis_angle.from_quaternion(quat) self.assertAllEqual( axis_angle.is_normalized(axis, angle), np.ones(angle.shape, dtype=bool)) def test_from_quaternion_normalized_random(self): """Tests that from_quaternion returns normalized axis-angles.""" random_quaternions = test_helpers.generate_random_test_quaternions() random_axis, random_angle = axis_angle.from_quaternion(random_quaternions) self.assertAllEqual( axis_angle.is_normalized(random_axis, random_angle), np.ones(random_angle.shape)) def test_from_quaternion_preset(self): """Tests that axis_angle.from_quaternion produces the expected result.""" preset_euler_angles = test_helpers.generate_preset_test_euler_angles() preset_quaternions = quaternion.from_euler(preset_euler_angles) preset_axis_angle = axis_angle.from_euler(preset_euler_angles) self.assertAllClose( preset_axis_angle, axis_angle.from_quaternion(preset_quaternions), rtol=1e-3) def test_from_quaternion_random(self): """Tests that axis_angle.from_quaternion produces the expected result.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() random_quaternions = quaternion.from_euler(random_euler_angles) random_axis_angle = axis_angle.from_euler(random_euler_angles) self.assertAllClose( random_axis_angle, axis_angle.from_quaternion(random_quaternions), rtol=1e-3) @parameterized.parameters( ((3, 3),), ((None, 3, 3),), ) def test_from_rotation_matrix_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(axis_angle.from_rotation_matrix, shapes) @parameterized.parameters( ("must have a rank greater than 1", (3,)), ("must have exactly 3 dimensions in axis -1", (3, None)), ("must have exactly 3 dimensions in axis -2", (None, 3)), ) def test_from_rotation_matrix_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(axis_angle.from_rotation_matrix, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_rotation_matrix_jacobian_random(self): """Test the Jacobian of the from_rotation_matrix function. Note: Preset angles are not tested as the gradient of tf.norm is NaN a 0. """ x_init = test_helpers.generate_random_test_rotation_matrix_3d() self.assert_jacobian_is_finite_fn( lambda x: axis_angle.from_rotation_matrix(x)[0], [x_init]) self.assert_jacobian_is_finite_fn( lambda x: axis_angle.from_rotation_matrix(x)[1], [x_init]) def test_from_rotation_matrix_normalized_preset(self): """Tests that from_rotation_matrix returns normalized axis-angles.""" preset_euler_angles = test_helpers.generate_preset_test_euler_angles() matrix = rotation_matrix_3d.from_euler(preset_euler_angles) axis, angle = axis_angle.from_rotation_matrix(matrix) self.assertAllEqual( axis_angle.is_normalized(axis, angle), np.ones(angle.shape, dtype=bool)) def test_from_rotation_matrix_normalized_random(self): """Tests that from_rotation_matrix returns normalized axis-angles.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() matrix = rotation_matrix_3d.from_euler(random_euler_angles) axis, angle = axis_angle.from_rotation_matrix(matrix) self.assertAllEqual( axis_angle.is_normalized(axis, angle), np.ones(angle.shape, dtype=bool)) def test_from_rotation_matrix_random(self): """Tests rotation around Z axis.""" def get_rotation_matrix_around_z(angle_rad): return np.array([ [np.cos(angle_rad), -np.sin(angle_rad), 0], [np.sin(angle_rad), np.cos(angle_rad), 0], [0, 0, 1], ]) tensor_size = np.random.randint(10) angle = ( np.array([ np.deg2rad(np.random.randint(720) - 360) for _ in range(tensor_size) ]).reshape((tensor_size, 1))) rotation_matrix = [get_rotation_matrix_around_z(i[0]) for i in angle] rotation_matrix = np.array(rotation_matrix).reshape((tensor_size, 3, 3)) tf_axis, tf_angle = axis_angle.from_rotation_matrix(rotation_matrix) axis = np.tile([[0., 0., 1.]], (angle.shape[0], 1)) tf_quat_gt = quaternion.from_axis_angle(axis, angle) tf_quat = quaternion.from_axis_angle(tf_axis, tf_angle) # Compare quaternions since axis orientation and angle ambiguity will # lead to more complex comparisons. for quat_gt, quat in zip(self.evaluate(tf_quat_gt), self.evaluate(tf_quat)): # Remember that q=-q for any quaternion. pos = np.allclose(quat_gt, quat) neg = np.allclose(quat_gt, -quat) self.assertTrue(pos or neg) @parameterized.parameters( ((3,), (1,)), ((None, 3), (None, 1)), ((2, 3), (2, 1)), ((1, 3), (1,)), ((3,), (1, 1)), ) def test_inverse_exception_not_raised(self, *shape): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(axis_angle.inverse, shape) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (None,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (None,)), ) def test_inverse_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(axis_angle.inverse, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_inverse_jacobian_preset(self): """Test the Jacobian of the inverse function.""" x_axis_init, x_angle_init = test_helpers.generate_preset_test_axis_angle() if tf.executing_eagerly(): # Because axis is returned as is, gradient calculation fails in graph mode # but not in eager mode. This is a side effect of having a graph rather # than a problem of the function. with self.subTest("axis"): self.assert_jacobian_is_correct_fn( lambda x: axis_angle.inverse(x, x_angle_init)[0], [x_axis_init]) with self.subTest("angle"): self.assert_jacobian_is_correct_fn( lambda x: axis_angle.inverse(x_axis_init, x)[1], [x_angle_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_inverse_jacobian_random(self): """Test the Jacobian of the inverse function.""" x_axis_init, x_angle_init = test_helpers.generate_random_test_axis_angle() if tf.executing_eagerly(): # Because axis is returned as is, gradient calculation fails in graph mode # but not in eager mode. This is a side effect of having a graph rather # than a problem of the function. with self.subTest("axis"): self.assert_jacobian_is_correct_fn( lambda x: axis_angle.inverse(1.0 * x, x_angle_init)[0], [x_axis_init]) with self.subTest("angle"): self.assert_jacobian_is_correct_fn( lambda x: axis_angle.inverse(x_axis_init, x)[1], [x_angle_init]) def test_inverse_normalized_random(self): """Tests that axis-angle inversion return a normalized axis-angle.""" random_axis, random_angle = test_helpers.generate_random_test_axis_angle() inverse_axis, inverse_angle = axis_angle.inverse(random_axis, random_angle) self.assertAllEqual( axis_angle.is_normalized(inverse_axis, inverse_angle), np.ones(random_angle.shape)) def test_inverse_random(self): """Tests axis-angle inversion.""" random_axis, random_angle = test_helpers.generate_random_test_axis_angle() inverse_axis, inverse_angle = axis_angle.inverse(random_axis, random_angle) self.assertAllClose(inverse_axis, random_axis, rtol=1e-3) self.assertAllClose(inverse_angle, -random_angle, rtol=1e-3) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (None,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (None,)), ) def test_is_normalized_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(axis_angle.is_normalized, error_msg, shape) def test_is_normalized_random(self): """Tests that is_normalized works as intended.""" # Samples normalized axis-angles. random_euler_angles = test_helpers.generate_random_test_euler_angles() with self.subTest(name=("is_normalized")): random_axis, random_angle = axis_angle.from_euler(random_euler_angles) pred = axis_angle.is_normalized(random_axis, random_angle) self.assertAllEqual(np.ones(shape=random_angle.shape, dtype=bool), pred) with self.subTest(name=("is_not_normalized")): random_axis *= 1.01 pred = axis_angle.is_normalized(random_axis, random_angle) self.assertAllEqual(np.zeros(shape=random_angle.shape, dtype=bool), pred) @parameterized.parameters( ((3,), (3,), (1,)), ((None, 3), (None, 3), (None, 1)), ((2, 3), (2, 3), (2, 1)), ((3,), (1, 3), (1, 2, 1)), ((1, 2, 3), (1, 3), (1,)), ((3,), (1, 3), (1,)), ) def test_rotate_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(axis_angle.rotate, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (2,), (3,), (1,)), ("must have exactly 3 dimensions in axis -1", (3,), (2,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (3,), (2,)), ) def test_rotate_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(axis_angle.rotate, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_rotate_jacobian_preset(self): """Test the Jacobian of the rotate function.""" x_axis_init, x_angle_init = test_helpers.generate_preset_test_axis_angle() x_point_init = np.random.uniform(size=x_axis_init.shape) self.assert_jacobian_is_correct_fn( axis_angle.rotate, [x_point_init, x_axis_init, x_angle_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_rotate_jacobian_random(self): """Test the Jacobian of the rotate function.""" x_axis_init, x_angle_init = test_helpers.generate_random_test_axis_angle() x_point_init = np.random.uniform(size=x_axis_init.shape) self.assert_jacobian_is_correct_fn( axis_angle.rotate, [x_point_init, x_axis_init, x_angle_init]) def test_rotate_random(self): """Tests that the rotate provide the same results as quaternion.rotate.""" random_axis, random_angle = test_helpers.generate_random_test_axis_angle() tensor_shape = random_angle.shape[:-1] random_point = np.random.normal(size=tensor_shape + (3,)) random_quaternion = quaternion.from_axis_angle(random_axis, random_angle) ground_truth = quaternion.rotate(random_point, random_quaternion) prediction = axis_angle.rotate(random_point, random_axis, random_angle) self.assertAllClose(ground_truth, prediction, rtol=1e-6) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/geometry/transformation/tests/test_data.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module with test data for transformation tests.""" import numpy as np ANGLE_0 = np.array((0.,)) ANGLE_45 = np.array((np.pi / 4.,)) ANGLE_90 = np.array((np.pi / 2.,)) ANGLE_180 = np.array((np.pi,)) AXIS_2D_0 = np.array((0., 0.)) AXIS_2D_X = np.array((1., 0.)) AXIS_2D_Y = np.array((0., 1.)) def _rotation_2d_x(angle): """Creates a 2d rotation matrix. Args: angle: The angle. Returns: The 2d rotation matrix. """ angle = angle.item() return np.array(((np.cos(angle), -np.sin(angle)), (np.sin(angle), np.cos(angle)))) # pyformat: disable MAT_2D_ID = np.eye(2) MAT_2D_45 = _rotation_2d_x(ANGLE_45) MAT_2D_90 = _rotation_2d_x(ANGLE_90) MAT_2D_180 = _rotation_2d_x(ANGLE_180) AXIS_3D_0 = np.array((0., 0., 0.)) AXIS_3D_X = np.array((1., 0., 0.)) AXIS_3D_Y = np.array((0., 1., 0.)) AXIS_3D_Z = np.array((0., 0., 1.)) def _axis_angle_to_quaternion(axis, angle): """Converts an axis-angle representation to a quaternion. Args: axis: The axis of rotation. angle: The angle. Returns: The quaternion. """ quat = np.zeros(4) quat[0:3] = axis * np.sin(0.5 * angle) quat[3] = np.cos(0.5 * angle) return quat QUAT_ID = _axis_angle_to_quaternion(AXIS_3D_0, ANGLE_0) QUAT_X_45 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_45) QUAT_X_90 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_90) QUAT_X_180 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_180) QUAT_Y_45 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_45) QUAT_Y_90 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_90) QUAT_Y_180 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_180) QUAT_Z_45 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_45) QUAT_Z_90 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_90) QUAT_Z_180 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_180) def _rotation_3d_x(angle): """Creates a 3d rotation matrix around the x axis. Args: angle: The angle. Returns: The 3d rotation matrix. """ angle = angle.item() return np.array(((1., 0., 0.), (0., np.cos(angle), -np.sin(angle)), (0., np.sin(angle), np.cos(angle)))) # pyformat: disable def _rotation_3d_y(angle): """Creates a 3d rotation matrix around the y axis. Args: angle: The angle. Returns: The 3d rotation matrix. """ angle = angle.item() return np.array(((np.cos(angle), 0., np.sin(angle)), (0., 1., 0.), (-np.sin(angle), 0., np.cos(angle)))) # pyformat: disable def _rotation_3d_z(angle): """Creates a 3d rotation matrix around the z axis. Args: angle: The angle. Returns: The 3d rotation matrix. """ angle = angle.item() return np.array(((np.cos(angle), -np.sin(angle), 0.), (np.sin(angle), np.cos(angle), 0.), (0., 0., 1.))) # pyformat: disable MAT_3D_ID = np.eye(3) MAT_3D_X_45 = _rotation_3d_x(ANGLE_45) MAT_3D_X_90 = _rotation_3d_x(ANGLE_90) MAT_3D_X_180 = _rotation_3d_x(ANGLE_180) MAT_3D_Y_45 = _rotation_3d_y(ANGLE_45) MAT_3D_Y_90 = _rotation_3d_y(ANGLE_90) MAT_3D_Y_180 = _rotation_3d_y(ANGLE_180) MAT_3D_Z_45 = _rotation_3d_z(ANGLE_45) MAT_3D_Z_90 = _rotation_3d_z(ANGLE_90) MAT_3D_Z_180 = _rotation_3d_z(ANGLE_180)
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module with test data for transformation tests.""" import numpy as np ANGLE_0 = np.array((0.,)) ANGLE_45 = np.array((np.pi / 4.,)) ANGLE_90 = np.array((np.pi / 2.,)) ANGLE_180 = np.array((np.pi,)) AXIS_2D_0 = np.array((0., 0.)) AXIS_2D_X = np.array((1., 0.)) AXIS_2D_Y = np.array((0., 1.)) def _rotation_2d_x(angle): """Creates a 2d rotation matrix. Args: angle: The angle. Returns: The 2d rotation matrix. """ angle = angle.item() return np.array(((np.cos(angle), -np.sin(angle)), (np.sin(angle), np.cos(angle)))) # pyformat: disable MAT_2D_ID = np.eye(2) MAT_2D_45 = _rotation_2d_x(ANGLE_45) MAT_2D_90 = _rotation_2d_x(ANGLE_90) MAT_2D_180 = _rotation_2d_x(ANGLE_180) AXIS_3D_0 = np.array((0., 0., 0.)) AXIS_3D_X = np.array((1., 0., 0.)) AXIS_3D_Y = np.array((0., 1., 0.)) AXIS_3D_Z = np.array((0., 0., 1.)) def _axis_angle_to_quaternion(axis, angle): """Converts an axis-angle representation to a quaternion. Args: axis: The axis of rotation. angle: The angle. Returns: The quaternion. """ quat = np.zeros(4) quat[0:3] = axis * np.sin(0.5 * angle) quat[3] = np.cos(0.5 * angle) return quat QUAT_ID = _axis_angle_to_quaternion(AXIS_3D_0, ANGLE_0) QUAT_X_45 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_45) QUAT_X_90 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_90) QUAT_X_180 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_180) QUAT_Y_45 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_45) QUAT_Y_90 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_90) QUAT_Y_180 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_180) QUAT_Z_45 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_45) QUAT_Z_90 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_90) QUAT_Z_180 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_180) def _rotation_3d_x(angle): """Creates a 3d rotation matrix around the x axis. Args: angle: The angle. Returns: The 3d rotation matrix. """ angle = angle.item() return np.array(((1., 0., 0.), (0., np.cos(angle), -np.sin(angle)), (0., np.sin(angle), np.cos(angle)))) # pyformat: disable def _rotation_3d_y(angle): """Creates a 3d rotation matrix around the y axis. Args: angle: The angle. Returns: The 3d rotation matrix. """ angle = angle.item() return np.array(((np.cos(angle), 0., np.sin(angle)), (0., 1., 0.), (-np.sin(angle), 0., np.cos(angle)))) # pyformat: disable def _rotation_3d_z(angle): """Creates a 3d rotation matrix around the z axis. Args: angle: The angle. Returns: The 3d rotation matrix. """ angle = angle.item() return np.array(((np.cos(angle), -np.sin(angle), 0.), (np.sin(angle), np.cos(angle), 0.), (0., 0., 1.))) # pyformat: disable MAT_3D_ID = np.eye(3) MAT_3D_X_45 = _rotation_3d_x(ANGLE_45) MAT_3D_X_90 = _rotation_3d_x(ANGLE_90) MAT_3D_X_180 = _rotation_3d_x(ANGLE_180) MAT_3D_Y_45 = _rotation_3d_y(ANGLE_45) MAT_3D_Y_90 = _rotation_3d_y(ANGLE_90) MAT_3D_Y_180 = _rotation_3d_y(ANGLE_180) MAT_3D_Z_45 = _rotation_3d_z(ANGLE_45) MAT_3D_Z_90 = _rotation_3d_z(ANGLE_90) MAT_3D_Z_180 = _rotation_3d_z(ANGLE_180)
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/image/color_space/srgb.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements Tensorflow sRGB color space utility functions. More details about sRGB can be found on [this page.] (https://en.wikipedia.org/wiki/SRGB) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import tensorflow as tf from tensorflow_graphics.image.color_space import constants from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape # Conversion constants following the naming convention from the 'theory of the # transformation' section at https://en.wikipedia.org/wiki/SRGB. _A = constants.srgb_gamma["A"] _PHI = constants.srgb_gamma["PHI"] _K0 = constants.srgb_gamma["K0"] _GAMMA = constants.srgb_gamma["GAMMA"] def from_linear_rgb(linear_rgb, name=None): """Converts linear RGB to sRGB colors. Note: In the following, A1 to An are optional batch dimensions. Args: linear_rgb: A Tensor of shape `[A_1, ..., A_n, 3]`, where the last dimension represents RGB values in the range [0, 1] in linear color space. name: A name for this op that defaults to "srgb_from_linear_rgb". Raises: ValueError: If `linear_rgb` has rank < 1 or has its last dimension not equal to 3. Returns: A tensor of shape `[A_1, ..., A_n, 3]`, where the last dimension represents sRGB values. """ with tf.compat.v1.name_scope(name, "srgb_from_linear_rgb", [linear_rgb]): linear_rgb = tf.convert_to_tensor(value=linear_rgb) shape.check_static( tensor=linear_rgb, tensor_name="linear_rgb", has_rank_greater_than=0, has_dim_equals=(-1, 3)) linear_rgb = asserts.assert_all_in_range(linear_rgb, 0., 1.) # Adds a small eps to avoid nan gradients from the second branch of # tf.where. linear_rgb += sys.float_info.epsilon return tf.compat.v1.where(linear_rgb <= _K0 / _PHI, linear_rgb * _PHI, (1 + _A) * (linear_rgb**(1 / _GAMMA)) - _A) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements Tensorflow sRGB color space utility functions. More details about sRGB can be found on [this page.] (https://en.wikipedia.org/wiki/SRGB) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import tensorflow as tf from tensorflow_graphics.image.color_space import constants from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape # Conversion constants following the naming convention from the 'theory of the # transformation' section at https://en.wikipedia.org/wiki/SRGB. _A = constants.srgb_gamma["A"] _PHI = constants.srgb_gamma["PHI"] _K0 = constants.srgb_gamma["K0"] _GAMMA = constants.srgb_gamma["GAMMA"] def from_linear_rgb(linear_rgb, name=None): """Converts linear RGB to sRGB colors. Note: In the following, A1 to An are optional batch dimensions. Args: linear_rgb: A Tensor of shape `[A_1, ..., A_n, 3]`, where the last dimension represents RGB values in the range [0, 1] in linear color space. name: A name for this op that defaults to "srgb_from_linear_rgb". Raises: ValueError: If `linear_rgb` has rank < 1 or has its last dimension not equal to 3. Returns: A tensor of shape `[A_1, ..., A_n, 3]`, where the last dimension represents sRGB values. """ with tf.compat.v1.name_scope(name, "srgb_from_linear_rgb", [linear_rgb]): linear_rgb = tf.convert_to_tensor(value=linear_rgb) shape.check_static( tensor=linear_rgb, tensor_name="linear_rgb", has_rank_greater_than=0, has_dim_equals=(-1, 3)) linear_rgb = asserts.assert_all_in_range(linear_rgb, 0., 1.) # Adds a small eps to avoid nan gradients from the second branch of # tf.where. linear_rgb += sys.float_info.epsilon return tf.compat.v1.where(linear_rgb <= _K0 / _PHI, linear_rgb * _PHI, (1 + _A) * (linear_rgb**(1 / _GAMMA)) - _A) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/rendering/opengl/tests/math_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for OpenGL math routines.""" import math from absl.testing import flagsaver from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.transformation import look_at from tensorflow_graphics.rendering.camera import perspective from tensorflow_graphics.rendering.opengl import math as glm from tensorflow_graphics.util import test_case class MathTest(test_case.TestCase): def test_model_to_eye_preset(self): """Tests that model_to_eye generates expected results..""" point = ((2.0, 3.0, 4.0), (3.0, 4.0, 5.0)) camera_position = ((0.0, 0.0, 0.0), (0.1, 0.2, 0.3)) look_at_point = ((0.0, 0.0, 1.0), (0.4, 0.5, 0.6)) up_vector = ((0.0, 1.0, 0.0), (0.7, 0.8, 0.9)) pred = glm.model_to_eye(point, camera_position, look_at_point, up_vector) gt = ((-2.0, 3.0, -4.0), (2.08616257e-07, 1.27279234, -6.58179379)) self.assertAllClose(pred, gt) @parameterized.parameters( ((3,), (3,), (3,), (3,)), ((None, 3), (None, 3), (None, 3), (None, 3)), ((100, 3), (3,), (3,), (3,)), ((None, 1, 3), (None, 2, 3), (None, 2, 3), (None, 2, 3)), ) def test_model_to_eye_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(glm.model_to_eye, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (2,), (3,), (3,), (3,)), ("must have exactly 3 dimensions in axis -1", (3,), (2,), (3,), (3,)), ("must have exactly 3 dimensions in axis -1", (3,), (3,), (2,), (3,)), ("must have exactly 3 dimensions in axis -1", (3,), (3,), (3,), (2,)), ("Not all batch dimensions are identical", (3,), (2, 3), (3, 3), (3, 3)), ("Not all batch dimensions are broadcast-compatible", (2, 3), (3, 3), (3, 3), (3, 3)), ) def test_model_to_eye_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(glm.model_to_eye, error_msg, shapes) def test_model_to_eye_jacobian_preset(self): """Tests the Jacobian of model_to_eye.""" point_init = np.array(((2.0, 3.0, 4.0), (3.0, 4.0, 5.0))) camera_position_init = np.array(((0.0, 0.0, 0.0), (0.1, 0.2, 0.3))) look_at_init = np.array(((0.0, 0.0, 1.0), (0.4, 0.5, 0.6))) up_vector_init = np.array(((0.0, 1.0, 0.0), (0.7, 0.8, 0.9))) self.assert_jacobian_is_correct_fn( glm.model_to_eye, [point_init, camera_position_init, look_at_init, up_vector_init]) def test_model_to_eye_jacobian_random(self): """Tests the Jacobian of model_to_eye.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() point_init = np.random.uniform(size=tensor_shape + [3]) camera_position_init = np.random.uniform(size=tensor_shape + [3]) look_at_init = np.random.uniform(size=tensor_shape + [3]) up_vector_init = np.random.uniform(size=tensor_shape + [3]) self.assert_jacobian_is_correct_fn( glm.model_to_eye, [point_init, camera_position_init, look_at_init, up_vector_init]) def test_eye_to_clip_preset(self): """Tests that eye_to_clip generates expected results.""" point = ((2.0, 3.0, 4.0), (3.0, 4.0, 5.0)) vertical_field_of_view = ((60.0 * math.pi / 180.0,), (50.0 * math.pi / 180.0,)) aspect_ratio = ((1.5,), (1.6,)) near_plane = ((1.0,), (2.0,)) far_plane = ((10.0,), (11.0,)) pred = glm.eye_to_clip(point, vertical_field_of_view, aspect_ratio, near_plane, far_plane) gt = ((2.30940104, 5.19615173, -7.11111116, -4.0), (4.02095032, 8.57802773, -12.11111069, -5.0)) self.assertAllClose(pred, gt) @parameterized.parameters( ((3,), (1,), (1,), (1,), (1,)), ((None, 3), (None, 1), (None, 1), (None, 1), (None, 1)), ((None, 5, 3), (None, 5, 1), (None, 5, 1), (None, 5, 1), (None, 5, 1)), ) def test_eye_to_clip_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(glm.eye_to_clip, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (2,), (1,), (1,), (1,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (2,), (1,), (1,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (1,), (2,), (1,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (1,), (1,), (2,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (1,), (1,), (1,), (2,)), ("Not all batch dimensions are broadcast-compatible", (3, 3), (2, 1), (1,), (1,), (1,)), ) def test_eye_to_clip_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(glm.eye_to_clip, error_msg, shapes) def test_eye_to_clip_jacobian_preset(self): """Tests the Jacobian of eye_to_clip.""" point_init = np.array(((2.0, 3.0, 4.0), (3.0, 4.0, 5.0))) vertical_field_of_view_init = np.array( ((60.0 * math.pi / 180.0,), (50.0 * math.pi / 180.0,))) aspect_ratio_init = np.array(((1.5,), (1.6,))) near_init = np.array(((1.0,), (2.0,))) far_init = np.array(((10.0,), (11.0,))) self.assert_jacobian_is_correct_fn( glm.eye_to_clip, [ point_init, vertical_field_of_view_init, aspect_ratio_init, near_init, far_init ], atol=1e-5) def test_eye_to_clip_jacobian_random(self): """Tests the Jacobian of eye_to_clip.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() point_init = np.random.uniform(size=tensor_shape + [3]) eps = np.finfo(np.float64).eps vertical_field_of_view_init = np.random.uniform( eps, math.pi - eps, size=tensor_shape + [1]) aspect_ratio_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1]) near_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1]) far_init = near_init + np.random.uniform(eps, 10.0, size=tensor_shape + [1]) self.assert_jacobian_is_correct_fn( glm.eye_to_clip, [ point_init, vertical_field_of_view_init, aspect_ratio_init, near_init, far_init ], atol=1e-03) def test_clip_to_ndc_preset(self): """Tests that clip_to_ndc generates expected results.""" point = ((4.0, 8.0, 16.0, 2.0), (4.0, 8.0, 16.0, 1.0)) pred = glm.clip_to_ndc(point) gt = ((2.0, 4.0, 8.0), (4.0, 8.0, 16.0)) self.assertAllClose(pred, gt) @parameterized.parameters( ((4,)), ((None, 4),), ((None, 5, 4),), ) def test_clip_to_ndc_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(glm.clip_to_ndc, shapes) def test_clip_to_ndc_exception_raised(self): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised( glm.clip_to_ndc, "must have exactly 4 dimensions in axis -1", ((2,),)) def test_clip_to_ndc_jacobian_preset(self): """Tests the Jacobian of clip_to_ndc.""" point_init = np.array(((4.0, 8.0, 16.0, 2.0), (4.0, 8.0, 16.0, 1.0))) self.assert_jacobian_is_correct_fn(glm.clip_to_ndc, [point_init]) def test_clip_to_ndc_jacobian_random(self): """Tests the Jacobian of clip_to_ndc.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() point_init = np.random.uniform(size=tensor_shape + [4]) self.assert_jacobian_is_correct_fn( glm.clip_to_ndc, [point_init], atol=1e-04) def test_ndc_to_screen_preset(self): """Tests that ndc_to_screen generates expected results.""" point = ((1.1, 2.2, 3.3), (5.1, 5.2, 5.3)) lower_left_corner = ((6.4, 4.8), (0.0, 0.0)) screen_dimensions = ((640.0, 480.0), (300.0, 400.0)) near = ((1.0,), (11.0,)) far = ((10.0,), (100.0,)) pred = glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near, far) gt = ((678.40002441, 772.79998779, 20.34999847), (915.0, 1240.0, 291.3500061)) self.assertAllClose(pred, gt) @parameterized.parameters( ((3,), (2,), (2,), (1,), (1,)), ((None, 3), (None, 2), (None, 2), (None, 1), (None, 1)), ((None, 5, 3), (None, 5, 2), (None, 5, 2), (None, 5, 1), (None, 5, 1)), ) def test_ndc_to_screen_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(glm.ndc_to_screen, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (2,), (2,), (2,), (1,), (1,)), ("must have exactly 2 dimensions in axis -1", (3,), (1,), (2,), (1,), (1,)), ("must have exactly 2 dimensions in axis -1", (3,), (2,), (3,), (1,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (2,), (2,), (2,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (2,), (2,), (1,), (3,)), ("Not all batch dimensions are identical", (3,), (2, 2), (3, 2), (3, 1), (3, 1)), ("Not all batch dimensions are broadcast-compatible", (4, 3), (3, 2), (3, 2), (3, 1), (3, 1)), ) def test_ndc_to_screen_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(glm.ndc_to_screen, error_msg, shapes) def test_ndc_to_screen_exception_near_raised(self): """Tests that an exception is raised when `near` is not strictly positive.""" point = np.random.uniform(size=(3,)) lower_left_corner = np.random.uniform(size=(2,)) screen_dimensions = np.random.uniform(1.0, 2.0, size=(2,)) near = np.random.uniform(-1.0, 0.0, size=(1,)) far = np.random.uniform(1.0, 2.0, size=(1,)) with self.subTest("negative_near"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near, far)) with self.subTest("zero_near"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, np.array((0.0,)), far)) def test_ndc_to_screen_exception_far_raised(self): """Tests that an exception is raised if `far` is not greater than `near`.""" point = np.random.uniform(size=(3,)) lower_left_corner = np.random.uniform(size=(2,)) screen_dimensions = np.random.uniform(1.0, 2.0, size=(2,)) near = np.random.uniform(1.0, 10.0, size=(1,)) far = near + np.random.uniform(-1.0, 0.0, size=(1,)) with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near, far)) def test_ndc_to_screen_exception_screen_dimensions_raised(self): """Tests that an exception is raised when `screen_dimensions` is not strictly positive.""" point = np.random.uniform(size=(3,)) lower_left_corner = np.random.uniform(size=(2,)) screen_dimensions = np.random.uniform(-1.0, 0.0, size=(2,)) near = np.random.uniform(1.0, 10.0, size=(1,)) far = near + np.random.uniform(0.1, 1.0, size=(1,)) with self.subTest("negative_screen_dimensions"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near, far)) with self.subTest("zero_screen_dimensions"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( glm.ndc_to_screen(point, lower_left_corner, np.array((0.0, 0.0)), near, far)) def test_ndc_to_screen_jacobian_preset(self): """Tests the Jacobian of ndc_to_screen.""" point_init = np.array(((1.1, 2.2, 3.3), (5.1, 5.2, 5.3))) lower_left_corner_init = np.array(((6.4, 4.8), (0.0, 0.0))) screen_dimensions_init = np.array(((640.0, 480.0), (300.0, 400.0))) near_init = np.array(((1.0,), (11.0,))) far_init = np.array(((10.0,), (100.0,))) self.assert_jacobian_is_correct_fn(glm.ndc_to_screen, [ point_init, lower_left_corner_init, screen_dimensions_init, near_init, far_init ]) def test_ndc_to_screen_jacobian_random(self): """Tests the Jacobian of ndc_to_screen.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() point_init = np.random.uniform(size=tensor_shape + [3]) lower_left_corner_init = np.random.uniform(size=tensor_shape + [2]) screen_dimensions_init = np.random.uniform( 1.0, 1000.0, size=tensor_shape + [2]) near_init = np.random.uniform(1.0, 10.0, size=tensor_shape + [1]) far_init = near_init + np.random.uniform(0.1, 1.0, size=(1,)) self.assert_jacobian_is_correct_fn(glm.ndc_to_screen, [ point_init, lower_left_corner_init, screen_dimensions_init, near_init, far_init ]) def test_model_to_screen_preset(self): """Tests that model_to_screen generates expected results.""" point_world_space = np.array(((3.1, 4.1, 5.1), (-1.1, 2.2, -3.1))) camera_position = np.array(((0.0, 0.0, 0.0), (0.4, -0.8, 0.1))) camera_up = np.array(((0.0, 1.0, 0.0), (0.0, 0.0, 1.0))) look_at_point = np.array(((0.0, 0.0, 1.0), (0.0, 1.0, 0.0))) vertical_field_of_view = np.array( ((60.0 * math.pi / 180.0,), (65 * math.pi / 180,))) lower_left_corner = np.array(((0.0, 0.0), (10.0, 20.0))) screen_dimensions = np.array(((501.0, 501.0), (400.0, 600.0))) near = np.array(((0.01,), (1.0,))) far = np.array(((4.0,), (3.0,))) # Build matrices. model_to_eye_matrix = look_at.right_handed(camera_position, look_at_point, camera_up) perspective_matrix = perspective.right_handed( vertical_field_of_view, screen_dimensions[..., 0:1] / screen_dimensions[..., 1:2], near, far) pred_screen, pred_w = glm.model_to_screen(point_world_space, model_to_eye_matrix, perspective_matrix, screen_dimensions, lower_left_corner) gt_screen = ((-13.23016357, 599.30444336, 4.00215721), (98.07017517, -95.40383911, 3.1234405)) gt_w = ((5.1,), (3.42247,)) self.assertAllClose(pred_screen, gt_screen, atol=1e-5, rtol=1e-5) self.assertAllClose(pred_w, gt_w) @parameterized.parameters( ((3,), (4, 4), (4, 4), (2,), (2,)), ((640, 480, 3), (4, 4), (4, 4), (2,), (2,)), ((None, 3), (None, 4, 4), (None, 4, 4), (None, 2), (None, 2)), ((3,), (None, 1, 4, 4), (None, 1, 4, 4), (None, 1, 2), (None, 1, 2)), ) def test_model_to_screen_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(glm.model_to_screen, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (9.0, 12.0), (0.0, 0.0), (2,), (4, 4), (4, 4)), ("must have exactly 4 dimensions in axis -1", (9.0, 12.0), (0.0, 0.0), (3,), (4, 3), (4, 4)), ("must have exactly 4 dimensions in axis -2", (9.0, 12.0), (0.0, 0.0), (3,), (3, 4), (4, 4)), ("must have exactly 4 dimensions in axis -1", (9.0, 12.0), (0.0, 0.0), (3,), (4, 4), (4, 3)), ("must have exactly 4 dimensions in axis -2", (9.0, 12.0), (0.0, 0.0), (3,), (4, 4), (3, 4)), ("Not all batch dimensions are broadcast-compatible", (9.0, 12.0), (0.0, 0.0), (2, 3), (3, 4, 4), (3, 4, 4)), ) def test_model_to_screen_exception_raised(self, error_msg, screen_dimensions, lower_left_corner, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised( func=glm.model_to_screen, error_msg=error_msg, shapes=shapes, screen_dimensions=screen_dimensions, lower_left_corner=lower_left_corner) def test_model_to_screen_jacobian_preset(self): """Tests the Jacobian of model_to_screen.""" point_world_space_init = np.array(((3.1, 4.1, 5.1), (-1.1, 2.2, -3.1))) camera_position_init = np.array(((0.0, 0.0, 0.0), (0.4, -0.8, 0.1))) camera_up_init = np.array(((0.0, 1.0, 0.0), (0.0, 0.0, 1.0))) look_at_init = np.array(((0.0, 0.0, 1.0), (0.0, 1.0, 0.0))) vertical_field_of_view_init = np.array( ((60.0 * math.pi / 180.0,), (65 * math.pi / 180,))) lower_left_corner_init = np.array(((0.0, 0.0), (10.0, 20.0))) screen_dimensions_init = np.array(((501.0, 501.0), (400.0, 600.0))) near_init = np.array(((0.01,), (1.0,))) far_init = np.array(((4.0,), (3.0,))) # Build matrices. model_to_eye_matrix = look_at.right_handed(camera_position_init, look_at_init, camera_up_init) perspective_matrix = perspective.right_handed( vertical_field_of_view_init, screen_dimensions_init[..., 0:1] / screen_dimensions_init[..., 1:2], near_init, far_init) args = [ point_world_space_init, model_to_eye_matrix, perspective_matrix, screen_dimensions_init, lower_left_corner_init ] with self.subTest(name="jacobian_y_projection"): self.assert_jacobian_is_correct_fn( lambda *args: glm.model_to_screen(*args)[0], args, atol=1e-4) # TODO(julienvalentin): will be fixed before submission # with self.subTest(name="jacobian_w"): # self.assert_jacobian_is_correct_fn( # lambda *args: glm.model_to_screen(*args)[1], args) def test_model_to_screen_jacobian_random(self): """Tests the Jacobian of model_to_screen.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() point_world_space_init = np.random.uniform(size=tensor_shape + [3]) camera_position_init = np.random.uniform(size=tensor_shape + [3]) camera_up_init = np.random.uniform(size=tensor_shape + [3]) look_at_init = np.random.uniform(size=tensor_shape + [3]) vertical_field_of_view_init = np.random.uniform( 0.1, 1.0, size=tensor_shape + [1]) lower_left_corner_init = np.random.uniform(size=tensor_shape + [2]) screen_dimensions_init = np.random.uniform( 0.1, 1.0, size=tensor_shape + [2]) near_init = np.random.uniform(0.1, 1.0, size=tensor_shape + [1]) far_init = near_init + np.random.uniform(0.1, 1.0, size=tensor_shape + [1]) # Build matrices. model_to_eye_matrix = look_at.right_handed(camera_position_init, look_at_init, camera_up_init) perspective_matrix = perspective.right_handed( vertical_field_of_view_init, screen_dimensions_init[..., 0:1] / screen_dimensions_init[..., 1:2], near_init, far_init) args = [ point_world_space_init, model_to_eye_matrix, perspective_matrix, screen_dimensions_init, lower_left_corner_init ] with self.subTest(name="jacobian_y_projection"): self.assert_jacobian_is_correct_fn( lambda *args: glm.model_to_screen(*args)[0], args, atol=1e-4) # TODO(julienvalentin): will be fixed before submission # with self.subTest(name="jacobian_w"): # self.assert_jacobian_is_correct_fn( # lambda *args: glm.model_to_screen(*args)[1], args) def test_perspective_correct_interpolation_preset(self): """Tests that perspective_correct_interpolation generates expected results.""" camera_origin = np.array((0.0, 0.0, 0.0)) camera_up = np.array((0.0, 1.0, 0.0)) look_at_point = np.array((0.0, 0.0, 1.0)) fov = np.array((90.0 * np.math.pi / 180.0,)) bottom_left = np.array((0.0, 0.0)) image_size = np.array((501.0, 501.0)) near_plane = np.array((0.01,)) far_plane = np.array((10.0,)) batch_size = np.random.randint(1, 5) triangle_x_y = np.random.uniform(-10.0, 10.0, (batch_size, 3, 2)) triangle_z = np.random.uniform(2.0, 10.0, (batch_size, 3, 1)) triangles = np.concatenate((triangle_x_y, triangle_z), axis=-1) # Builds barycentric weights. barycentric_weights = np.random.uniform(size=(batch_size, 3)) barycentric_weights = barycentric_weights / np.sum( barycentric_weights, axis=-1, keepdims=True) # Barycentric interpolation of vertex positions. convex_combination = np.einsum("ba, bac -> bc", barycentric_weights, triangles) # Build matrices. model_to_eye_matrix = look_at.right_handed(camera_origin, look_at_point, camera_up) perspective_matrix = perspective.right_handed( fov, (image_size[0:1] / image_size[1:2]), near_plane, far_plane) # Computes where those points project in screen coordinates. pixel_position, _ = glm.model_to_screen(convex_combination, model_to_eye_matrix, perspective_matrix, image_size, bottom_left) # Builds attributes. num_pixels = pixel_position.shape[0] attribute_size = np.random.randint(10) attributes = np.random.uniform(size=(num_pixels, 3, attribute_size)) prediction = glm.perspective_correct_interpolation(triangles, attributes, pixel_position[..., 0:2], model_to_eye_matrix, perspective_matrix, image_size, bottom_left) groundtruth = np.einsum("ba, bac -> bc", barycentric_weights, attributes) self.assertAllClose(prediction, groundtruth) def test_perspective_correct_interpolation_jacobian_preset(self): """Tests the Jacobian of perspective_correct_interpolation.""" vertices_init = np.tile( ((-0.2857143, 0.2857143, 5.0), (0.2857143, 0.2857143, 0.5), (0.0, -0.2857143, 1.0)), (2, 1, 1)) attributes_init = np.tile( (((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))), (2, 1, 1)) pixel_position_init = np.array(((125.5, 375.5), (250.5, 250.5))) camera_position_init = np.tile((0.0, 0.0, 0.0), (2, 3, 1)) look_at_init = np.tile((0.0, 0.0, 1.0), (2, 3, 1)) up_vector_init = np.tile((0.0, 1.0, 0.0), (2, 3, 1)) vertical_field_of_view_init = np.tile((1.0471975511965976,), (2, 3, 1)) screen_dimensions_init = np.tile((501.0, 501.0), (2, 3, 1)) near_init = np.tile((0.01,), (2, 3, 1)) far_init = np.tile((10.0,), (2, 3, 1)) lower_left_corner_init = np.tile((0.0, 0.0), (2, 3, 1)) # Build matrices. model_to_eye_matrix_init = look_at.right_handed(camera_position_init, look_at_init, up_vector_init) perspective_matrix_init = perspective.right_handed( vertical_field_of_view_init, screen_dimensions_init[..., 0:1] / screen_dimensions_init[..., 1:2], near_init, far_init) self.assert_jacobian_is_correct_fn(glm.perspective_correct_interpolation, [ vertices_init, attributes_init, pixel_position_init, model_to_eye_matrix_init, perspective_matrix_init, screen_dimensions_init, lower_left_corner_init ]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_perspective_correct_interpolation_jacobian_random(self): """Tests the Jacobian of perspective_correct_interpolation.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() vertices_init = np.random.uniform(size=tensor_shape + [3, 3]) num_attributes = np.random.randint(1, 10) attributes_init = np.random.uniform(size=tensor_shape + [3, num_attributes]) pixel_position_init = np.random.uniform(size=tensor_shape + [2]) camera_position_init = np.random.uniform(size=tensor_shape + [3, 3]) look_at_init = np.random.uniform(size=tensor_shape + [3, 3]) up_vector_init = np.random.uniform(size=tensor_shape + [3, 3]) vertical_field_of_view_init = np.random.uniform( 0.1, 1.0, size=tensor_shape + [3, 1]) screen_dimensions_init = np.random.uniform( 1.0, 10.0, size=tensor_shape + [3, 2]) near_init = np.random.uniform(1.0, 10.0, size=tensor_shape + [3, 1]) far_init = near_init + np.random.uniform( 0.1, 1.0, size=tensor_shape + [3, 1]) lower_left_corner_init = np.random.uniform(size=tensor_shape + [3, 2]) # Build matrices. model_to_eye_matrix_init = look_at.right_handed(camera_position_init, look_at_init, up_vector_init) perspective_matrix_init = perspective.right_handed( vertical_field_of_view_init, screen_dimensions_init[..., 0:1] / screen_dimensions_init[..., 1:2], near_init, far_init) self.assert_jacobian_is_correct_fn( glm.perspective_correct_interpolation, [ vertices_init, attributes_init, pixel_position_init, model_to_eye_matrix_init, perspective_matrix_init, screen_dimensions_init, lower_left_corner_init ], atol=1e-4) @parameterized.parameters( ((3, 3), (2,), (4, 4), (4, 4), (2,)), ((3, 3), (7, 2), (4, 4), (4, 4), (2,)), ((3, 3), (None, 2), (4, 4), (4, 4), (2,)), ((7, 3, 3), (2,), (4, 4), (4, 4), (2,)), ((None, 3, 3), (2,), (4, 4), (4, 4), (2,)), ) def test_perspective_correct_barycentrics_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(glm.perspective_correct_barycentrics, shapes) @parameterized.parameters( ("must have exactly 2 dimensions in axis -1", (3, 3), (2,), (4, 4), (4, 4), (3,)), ("must have exactly 3 dimensions in axis -1", (3, 4), (2,), (4, 4), (4, 4), (3,)), ("must have exactly 3 dimensions in axis -2", (4, 3), (2,), (4, 4), (4, 4), (3,)), ) def test_perspective_correct_barycentrics_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(glm.perspective_correct_barycentrics, error_msg, shapes) def test_perspective_correct_barycentrics_preset(self): """Tests that perspective_correct_barycentrics generates expected results.""" camera_origin = np.array((0.0, 0.0, 0.0)) camera_up = np.array((0.0, 1.0, 0.0)) look_at_point = np.array((0.0, 0.0, 1.0)) fov = np.array((90.0 * np.math.pi / 180.0,)) bottom_left = np.array((0.0, 0.0)) image_size = np.array((501.0, 501.0)) near_plane = np.array((0.01,)) far_plane = np.array((10.0,)) batch_size = np.random.randint(1, 5) triangle_x_y = np.random.uniform(-10.0, 10.0, (batch_size, 3, 2)) triangle_z = np.random.uniform(2.0, 10.0, (batch_size, 3, 1)) triangles = np.concatenate((triangle_x_y, triangle_z), axis=-1) # Builds barycentric weights. barycentric_weights = np.random.uniform(size=(batch_size, 3)) barycentric_weights = barycentric_weights / np.sum( barycentric_weights, axis=-1, keepdims=True) # Barycentric interpolation of vertex positions. convex_combination = np.einsum("ba, bac -> bc", barycentric_weights, triangles) # Build matrices. model_to_eye_matrix = look_at.right_handed(camera_origin, look_at_point, camera_up) perspective_matrix = perspective.right_handed( fov, (image_size[0:1] / image_size[1:2]), near_plane, far_plane) # Computes where those points project in screen coordinates. pixel_position, _ = glm.model_to_screen(convex_combination, model_to_eye_matrix, perspective_matrix, image_size, bottom_left) prediction = glm.perspective_correct_barycentrics(triangles, pixel_position[..., 0:2], model_to_eye_matrix, perspective_matrix, image_size, bottom_left) self.assertAllClose(prediction, barycentric_weights) def test_perspective_correct_barycentrics_jacobian_random(self): """Tests the Jacobian of perspective_correct_barycentrics.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() vertices_init = np.random.uniform(size=tensor_shape + [3, 3]) pixel_position_init = np.random.uniform(size=tensor_shape + [2]) camera_position_init = np.random.uniform(size=tensor_shape + [3, 3]) look_at_init = np.random.uniform(size=tensor_shape + [3, 3]) up_vector_init = np.random.uniform(size=tensor_shape + [3, 3]) vertical_field_of_view_init = np.random.uniform( 0.1, 1.0, size=tensor_shape + [3, 1]) screen_dimensions_init = np.random.uniform( 1.0, 10.0, size=tensor_shape + [3, 2]) near_init = np.random.uniform(1.0, 10.0, size=tensor_shape + [3, 1]) far_init = near_init + np.random.uniform( 0.1, 1.0, size=tensor_shape + [3, 1]) lower_left_corner_init = np.random.uniform(size=tensor_shape + [3, 2]) # Build matrices. model_to_eye_matrix_init = look_at.right_handed(camera_position_init, look_at_init, up_vector_init) perspective_matrix_init = perspective.right_handed( vertical_field_of_view_init, screen_dimensions_init[..., 0:1] / screen_dimensions_init[..., 1:2], near_init, far_init) self.assert_jacobian_is_correct_fn( glm.perspective_correct_barycentrics, [ vertices_init, pixel_position_init, model_to_eye_matrix_init, perspective_matrix_init, screen_dimensions_init, lower_left_corner_init ], atol=1e-4) @parameterized.parameters( ((3, 7), (3,)), ((2, 3, 7), (2, 3)), ((None, 3, 7), (None, 3)), ) def test_interpolate_attributes_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(glm.interpolate_attributes, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -2", (2, 7), (3,)), ("must have exactly 3 dimensions in axis -1", (3, 7), (2,)), ("Not all batch dimensions are broadcast-compatible", (5, 3, 7), (4, 3)), ) def test_interpolate_attributes_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(glm.interpolate_attributes, error_msg, shapes) def test_interpolate_attributes_random(self): """Checks the output of interpolate_attributes.""" attributes = np.random.uniform(-1.0, 1.0, size=(3,)) barycentric = np.random.uniform(0.0, 1.0, size=(3,)) barycentric = barycentric / np.linalg.norm( barycentric, axis=-1, ord=1, keepdims=True) groundtruth = np.sum(attributes * barycentric, keepdims=True) attributes = np.reshape(attributes, (3, 1)) prediction = glm.interpolate_attributes(attributes, barycentric) self.assertAllClose(groundtruth, prediction) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_interpolate_attributes_jacobian_random(self): """Tests the jacobian of interpolate_attributes.""" batch_size = np.random.randint(1, 5) attributes = np.random.uniform(-1.0, 1.0, size=(batch_size, 3, 1)) barycentric = np.random.uniform( 0.0, 1.0, size=( batch_size, 3, )) barycentric = barycentric / np.linalg.norm( barycentric, axis=-1, ord=1, keepdims=True) self.assert_jacobian_is_correct_fn(glm.interpolate_attributes, [attributes, barycentric]) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for OpenGL math routines.""" import math from absl.testing import flagsaver from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.transformation import look_at from tensorflow_graphics.rendering.camera import perspective from tensorflow_graphics.rendering.opengl import math as glm from tensorflow_graphics.util import test_case class MathTest(test_case.TestCase): def test_model_to_eye_preset(self): """Tests that model_to_eye generates expected results..""" point = ((2.0, 3.0, 4.0), (3.0, 4.0, 5.0)) camera_position = ((0.0, 0.0, 0.0), (0.1, 0.2, 0.3)) look_at_point = ((0.0, 0.0, 1.0), (0.4, 0.5, 0.6)) up_vector = ((0.0, 1.0, 0.0), (0.7, 0.8, 0.9)) pred = glm.model_to_eye(point, camera_position, look_at_point, up_vector) gt = ((-2.0, 3.0, -4.0), (2.08616257e-07, 1.27279234, -6.58179379)) self.assertAllClose(pred, gt) @parameterized.parameters( ((3,), (3,), (3,), (3,)), ((None, 3), (None, 3), (None, 3), (None, 3)), ((100, 3), (3,), (3,), (3,)), ((None, 1, 3), (None, 2, 3), (None, 2, 3), (None, 2, 3)), ) def test_model_to_eye_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(glm.model_to_eye, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (2,), (3,), (3,), (3,)), ("must have exactly 3 dimensions in axis -1", (3,), (2,), (3,), (3,)), ("must have exactly 3 dimensions in axis -1", (3,), (3,), (2,), (3,)), ("must have exactly 3 dimensions in axis -1", (3,), (3,), (3,), (2,)), ("Not all batch dimensions are identical", (3,), (2, 3), (3, 3), (3, 3)), ("Not all batch dimensions are broadcast-compatible", (2, 3), (3, 3), (3, 3), (3, 3)), ) def test_model_to_eye_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(glm.model_to_eye, error_msg, shapes) def test_model_to_eye_jacobian_preset(self): """Tests the Jacobian of model_to_eye.""" point_init = np.array(((2.0, 3.0, 4.0), (3.0, 4.0, 5.0))) camera_position_init = np.array(((0.0, 0.0, 0.0), (0.1, 0.2, 0.3))) look_at_init = np.array(((0.0, 0.0, 1.0), (0.4, 0.5, 0.6))) up_vector_init = np.array(((0.0, 1.0, 0.0), (0.7, 0.8, 0.9))) self.assert_jacobian_is_correct_fn( glm.model_to_eye, [point_init, camera_position_init, look_at_init, up_vector_init]) def test_model_to_eye_jacobian_random(self): """Tests the Jacobian of model_to_eye.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() point_init = np.random.uniform(size=tensor_shape + [3]) camera_position_init = np.random.uniform(size=tensor_shape + [3]) look_at_init = np.random.uniform(size=tensor_shape + [3]) up_vector_init = np.random.uniform(size=tensor_shape + [3]) self.assert_jacobian_is_correct_fn( glm.model_to_eye, [point_init, camera_position_init, look_at_init, up_vector_init]) def test_eye_to_clip_preset(self): """Tests that eye_to_clip generates expected results.""" point = ((2.0, 3.0, 4.0), (3.0, 4.0, 5.0)) vertical_field_of_view = ((60.0 * math.pi / 180.0,), (50.0 * math.pi / 180.0,)) aspect_ratio = ((1.5,), (1.6,)) near_plane = ((1.0,), (2.0,)) far_plane = ((10.0,), (11.0,)) pred = glm.eye_to_clip(point, vertical_field_of_view, aspect_ratio, near_plane, far_plane) gt = ((2.30940104, 5.19615173, -7.11111116, -4.0), (4.02095032, 8.57802773, -12.11111069, -5.0)) self.assertAllClose(pred, gt) @parameterized.parameters( ((3,), (1,), (1,), (1,), (1,)), ((None, 3), (None, 1), (None, 1), (None, 1), (None, 1)), ((None, 5, 3), (None, 5, 1), (None, 5, 1), (None, 5, 1), (None, 5, 1)), ) def test_eye_to_clip_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(glm.eye_to_clip, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (2,), (1,), (1,), (1,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (2,), (1,), (1,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (1,), (2,), (1,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (1,), (1,), (2,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (1,), (1,), (1,), (2,)), ("Not all batch dimensions are broadcast-compatible", (3, 3), (2, 1), (1,), (1,), (1,)), ) def test_eye_to_clip_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(glm.eye_to_clip, error_msg, shapes) def test_eye_to_clip_jacobian_preset(self): """Tests the Jacobian of eye_to_clip.""" point_init = np.array(((2.0, 3.0, 4.0), (3.0, 4.0, 5.0))) vertical_field_of_view_init = np.array( ((60.0 * math.pi / 180.0,), (50.0 * math.pi / 180.0,))) aspect_ratio_init = np.array(((1.5,), (1.6,))) near_init = np.array(((1.0,), (2.0,))) far_init = np.array(((10.0,), (11.0,))) self.assert_jacobian_is_correct_fn( glm.eye_to_clip, [ point_init, vertical_field_of_view_init, aspect_ratio_init, near_init, far_init ], atol=1e-5) def test_eye_to_clip_jacobian_random(self): """Tests the Jacobian of eye_to_clip.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() point_init = np.random.uniform(size=tensor_shape + [3]) eps = np.finfo(np.float64).eps vertical_field_of_view_init = np.random.uniform( eps, math.pi - eps, size=tensor_shape + [1]) aspect_ratio_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1]) near_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1]) far_init = near_init + np.random.uniform(eps, 10.0, size=tensor_shape + [1]) self.assert_jacobian_is_correct_fn( glm.eye_to_clip, [ point_init, vertical_field_of_view_init, aspect_ratio_init, near_init, far_init ], atol=1e-03) def test_clip_to_ndc_preset(self): """Tests that clip_to_ndc generates expected results.""" point = ((4.0, 8.0, 16.0, 2.0), (4.0, 8.0, 16.0, 1.0)) pred = glm.clip_to_ndc(point) gt = ((2.0, 4.0, 8.0), (4.0, 8.0, 16.0)) self.assertAllClose(pred, gt) @parameterized.parameters( ((4,)), ((None, 4),), ((None, 5, 4),), ) def test_clip_to_ndc_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(glm.clip_to_ndc, shapes) def test_clip_to_ndc_exception_raised(self): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised( glm.clip_to_ndc, "must have exactly 4 dimensions in axis -1", ((2,),)) def test_clip_to_ndc_jacobian_preset(self): """Tests the Jacobian of clip_to_ndc.""" point_init = np.array(((4.0, 8.0, 16.0, 2.0), (4.0, 8.0, 16.0, 1.0))) self.assert_jacobian_is_correct_fn(glm.clip_to_ndc, [point_init]) def test_clip_to_ndc_jacobian_random(self): """Tests the Jacobian of clip_to_ndc.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() point_init = np.random.uniform(size=tensor_shape + [4]) self.assert_jacobian_is_correct_fn( glm.clip_to_ndc, [point_init], atol=1e-04) def test_ndc_to_screen_preset(self): """Tests that ndc_to_screen generates expected results.""" point = ((1.1, 2.2, 3.3), (5.1, 5.2, 5.3)) lower_left_corner = ((6.4, 4.8), (0.0, 0.0)) screen_dimensions = ((640.0, 480.0), (300.0, 400.0)) near = ((1.0,), (11.0,)) far = ((10.0,), (100.0,)) pred = glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near, far) gt = ((678.40002441, 772.79998779, 20.34999847), (915.0, 1240.0, 291.3500061)) self.assertAllClose(pred, gt) @parameterized.parameters( ((3,), (2,), (2,), (1,), (1,)), ((None, 3), (None, 2), (None, 2), (None, 1), (None, 1)), ((None, 5, 3), (None, 5, 2), (None, 5, 2), (None, 5, 1), (None, 5, 1)), ) def test_ndc_to_screen_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(glm.ndc_to_screen, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (2,), (2,), (2,), (1,), (1,)), ("must have exactly 2 dimensions in axis -1", (3,), (1,), (2,), (1,), (1,)), ("must have exactly 2 dimensions in axis -1", (3,), (2,), (3,), (1,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (2,), (2,), (2,), (1,)), ("must have exactly 1 dimensions in axis -1", (3,), (2,), (2,), (1,), (3,)), ("Not all batch dimensions are identical", (3,), (2, 2), (3, 2), (3, 1), (3, 1)), ("Not all batch dimensions are broadcast-compatible", (4, 3), (3, 2), (3, 2), (3, 1), (3, 1)), ) def test_ndc_to_screen_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(glm.ndc_to_screen, error_msg, shapes) def test_ndc_to_screen_exception_near_raised(self): """Tests that an exception is raised when `near` is not strictly positive.""" point = np.random.uniform(size=(3,)) lower_left_corner = np.random.uniform(size=(2,)) screen_dimensions = np.random.uniform(1.0, 2.0, size=(2,)) near = np.random.uniform(-1.0, 0.0, size=(1,)) far = np.random.uniform(1.0, 2.0, size=(1,)) with self.subTest("negative_near"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near, far)) with self.subTest("zero_near"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, np.array((0.0,)), far)) def test_ndc_to_screen_exception_far_raised(self): """Tests that an exception is raised if `far` is not greater than `near`.""" point = np.random.uniform(size=(3,)) lower_left_corner = np.random.uniform(size=(2,)) screen_dimensions = np.random.uniform(1.0, 2.0, size=(2,)) near = np.random.uniform(1.0, 10.0, size=(1,)) far = near + np.random.uniform(-1.0, 0.0, size=(1,)) with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near, far)) def test_ndc_to_screen_exception_screen_dimensions_raised(self): """Tests that an exception is raised when `screen_dimensions` is not strictly positive.""" point = np.random.uniform(size=(3,)) lower_left_corner = np.random.uniform(size=(2,)) screen_dimensions = np.random.uniform(-1.0, 0.0, size=(2,)) near = np.random.uniform(1.0, 10.0, size=(1,)) far = near + np.random.uniform(0.1, 1.0, size=(1,)) with self.subTest("negative_screen_dimensions"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near, far)) with self.subTest("zero_screen_dimensions"): with self.assertRaises(tf.errors.InvalidArgumentError): self.evaluate( glm.ndc_to_screen(point, lower_left_corner, np.array((0.0, 0.0)), near, far)) def test_ndc_to_screen_jacobian_preset(self): """Tests the Jacobian of ndc_to_screen.""" point_init = np.array(((1.1, 2.2, 3.3), (5.1, 5.2, 5.3))) lower_left_corner_init = np.array(((6.4, 4.8), (0.0, 0.0))) screen_dimensions_init = np.array(((640.0, 480.0), (300.0, 400.0))) near_init = np.array(((1.0,), (11.0,))) far_init = np.array(((10.0,), (100.0,))) self.assert_jacobian_is_correct_fn(glm.ndc_to_screen, [ point_init, lower_left_corner_init, screen_dimensions_init, near_init, far_init ]) def test_ndc_to_screen_jacobian_random(self): """Tests the Jacobian of ndc_to_screen.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() point_init = np.random.uniform(size=tensor_shape + [3]) lower_left_corner_init = np.random.uniform(size=tensor_shape + [2]) screen_dimensions_init = np.random.uniform( 1.0, 1000.0, size=tensor_shape + [2]) near_init = np.random.uniform(1.0, 10.0, size=tensor_shape + [1]) far_init = near_init + np.random.uniform(0.1, 1.0, size=(1,)) self.assert_jacobian_is_correct_fn(glm.ndc_to_screen, [ point_init, lower_left_corner_init, screen_dimensions_init, near_init, far_init ]) def test_model_to_screen_preset(self): """Tests that model_to_screen generates expected results.""" point_world_space = np.array(((3.1, 4.1, 5.1), (-1.1, 2.2, -3.1))) camera_position = np.array(((0.0, 0.0, 0.0), (0.4, -0.8, 0.1))) camera_up = np.array(((0.0, 1.0, 0.0), (0.0, 0.0, 1.0))) look_at_point = np.array(((0.0, 0.0, 1.0), (0.0, 1.0, 0.0))) vertical_field_of_view = np.array( ((60.0 * math.pi / 180.0,), (65 * math.pi / 180,))) lower_left_corner = np.array(((0.0, 0.0), (10.0, 20.0))) screen_dimensions = np.array(((501.0, 501.0), (400.0, 600.0))) near = np.array(((0.01,), (1.0,))) far = np.array(((4.0,), (3.0,))) # Build matrices. model_to_eye_matrix = look_at.right_handed(camera_position, look_at_point, camera_up) perspective_matrix = perspective.right_handed( vertical_field_of_view, screen_dimensions[..., 0:1] / screen_dimensions[..., 1:2], near, far) pred_screen, pred_w = glm.model_to_screen(point_world_space, model_to_eye_matrix, perspective_matrix, screen_dimensions, lower_left_corner) gt_screen = ((-13.23016357, 599.30444336, 4.00215721), (98.07017517, -95.40383911, 3.1234405)) gt_w = ((5.1,), (3.42247,)) self.assertAllClose(pred_screen, gt_screen, atol=1e-5, rtol=1e-5) self.assertAllClose(pred_w, gt_w) @parameterized.parameters( ((3,), (4, 4), (4, 4), (2,), (2,)), ((640, 480, 3), (4, 4), (4, 4), (2,), (2,)), ((None, 3), (None, 4, 4), (None, 4, 4), (None, 2), (None, 2)), ((3,), (None, 1, 4, 4), (None, 1, 4, 4), (None, 1, 2), (None, 1, 2)), ) def test_model_to_screen_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(glm.model_to_screen, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -1", (9.0, 12.0), (0.0, 0.0), (2,), (4, 4), (4, 4)), ("must have exactly 4 dimensions in axis -1", (9.0, 12.0), (0.0, 0.0), (3,), (4, 3), (4, 4)), ("must have exactly 4 dimensions in axis -2", (9.0, 12.0), (0.0, 0.0), (3,), (3, 4), (4, 4)), ("must have exactly 4 dimensions in axis -1", (9.0, 12.0), (0.0, 0.0), (3,), (4, 4), (4, 3)), ("must have exactly 4 dimensions in axis -2", (9.0, 12.0), (0.0, 0.0), (3,), (4, 4), (3, 4)), ("Not all batch dimensions are broadcast-compatible", (9.0, 12.0), (0.0, 0.0), (2, 3), (3, 4, 4), (3, 4, 4)), ) def test_model_to_screen_exception_raised(self, error_msg, screen_dimensions, lower_left_corner, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised( func=glm.model_to_screen, error_msg=error_msg, shapes=shapes, screen_dimensions=screen_dimensions, lower_left_corner=lower_left_corner) def test_model_to_screen_jacobian_preset(self): """Tests the Jacobian of model_to_screen.""" point_world_space_init = np.array(((3.1, 4.1, 5.1), (-1.1, 2.2, -3.1))) camera_position_init = np.array(((0.0, 0.0, 0.0), (0.4, -0.8, 0.1))) camera_up_init = np.array(((0.0, 1.0, 0.0), (0.0, 0.0, 1.0))) look_at_init = np.array(((0.0, 0.0, 1.0), (0.0, 1.0, 0.0))) vertical_field_of_view_init = np.array( ((60.0 * math.pi / 180.0,), (65 * math.pi / 180,))) lower_left_corner_init = np.array(((0.0, 0.0), (10.0, 20.0))) screen_dimensions_init = np.array(((501.0, 501.0), (400.0, 600.0))) near_init = np.array(((0.01,), (1.0,))) far_init = np.array(((4.0,), (3.0,))) # Build matrices. model_to_eye_matrix = look_at.right_handed(camera_position_init, look_at_init, camera_up_init) perspective_matrix = perspective.right_handed( vertical_field_of_view_init, screen_dimensions_init[..., 0:1] / screen_dimensions_init[..., 1:2], near_init, far_init) args = [ point_world_space_init, model_to_eye_matrix, perspective_matrix, screen_dimensions_init, lower_left_corner_init ] with self.subTest(name="jacobian_y_projection"): self.assert_jacobian_is_correct_fn( lambda *args: glm.model_to_screen(*args)[0], args, atol=1e-4) # TODO(julienvalentin): will be fixed before submission # with self.subTest(name="jacobian_w"): # self.assert_jacobian_is_correct_fn( # lambda *args: glm.model_to_screen(*args)[1], args) def test_model_to_screen_jacobian_random(self): """Tests the Jacobian of model_to_screen.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() point_world_space_init = np.random.uniform(size=tensor_shape + [3]) camera_position_init = np.random.uniform(size=tensor_shape + [3]) camera_up_init = np.random.uniform(size=tensor_shape + [3]) look_at_init = np.random.uniform(size=tensor_shape + [3]) vertical_field_of_view_init = np.random.uniform( 0.1, 1.0, size=tensor_shape + [1]) lower_left_corner_init = np.random.uniform(size=tensor_shape + [2]) screen_dimensions_init = np.random.uniform( 0.1, 1.0, size=tensor_shape + [2]) near_init = np.random.uniform(0.1, 1.0, size=tensor_shape + [1]) far_init = near_init + np.random.uniform(0.1, 1.0, size=tensor_shape + [1]) # Build matrices. model_to_eye_matrix = look_at.right_handed(camera_position_init, look_at_init, camera_up_init) perspective_matrix = perspective.right_handed( vertical_field_of_view_init, screen_dimensions_init[..., 0:1] / screen_dimensions_init[..., 1:2], near_init, far_init) args = [ point_world_space_init, model_to_eye_matrix, perspective_matrix, screen_dimensions_init, lower_left_corner_init ] with self.subTest(name="jacobian_y_projection"): self.assert_jacobian_is_correct_fn( lambda *args: glm.model_to_screen(*args)[0], args, atol=1e-4) # TODO(julienvalentin): will be fixed before submission # with self.subTest(name="jacobian_w"): # self.assert_jacobian_is_correct_fn( # lambda *args: glm.model_to_screen(*args)[1], args) def test_perspective_correct_interpolation_preset(self): """Tests that perspective_correct_interpolation generates expected results.""" camera_origin = np.array((0.0, 0.0, 0.0)) camera_up = np.array((0.0, 1.0, 0.0)) look_at_point = np.array((0.0, 0.0, 1.0)) fov = np.array((90.0 * np.math.pi / 180.0,)) bottom_left = np.array((0.0, 0.0)) image_size = np.array((501.0, 501.0)) near_plane = np.array((0.01,)) far_plane = np.array((10.0,)) batch_size = np.random.randint(1, 5) triangle_x_y = np.random.uniform(-10.0, 10.0, (batch_size, 3, 2)) triangle_z = np.random.uniform(2.0, 10.0, (batch_size, 3, 1)) triangles = np.concatenate((triangle_x_y, triangle_z), axis=-1) # Builds barycentric weights. barycentric_weights = np.random.uniform(size=(batch_size, 3)) barycentric_weights = barycentric_weights / np.sum( barycentric_weights, axis=-1, keepdims=True) # Barycentric interpolation of vertex positions. convex_combination = np.einsum("ba, bac -> bc", barycentric_weights, triangles) # Build matrices. model_to_eye_matrix = look_at.right_handed(camera_origin, look_at_point, camera_up) perspective_matrix = perspective.right_handed( fov, (image_size[0:1] / image_size[1:2]), near_plane, far_plane) # Computes where those points project in screen coordinates. pixel_position, _ = glm.model_to_screen(convex_combination, model_to_eye_matrix, perspective_matrix, image_size, bottom_left) # Builds attributes. num_pixels = pixel_position.shape[0] attribute_size = np.random.randint(10) attributes = np.random.uniform(size=(num_pixels, 3, attribute_size)) prediction = glm.perspective_correct_interpolation(triangles, attributes, pixel_position[..., 0:2], model_to_eye_matrix, perspective_matrix, image_size, bottom_left) groundtruth = np.einsum("ba, bac -> bc", barycentric_weights, attributes) self.assertAllClose(prediction, groundtruth) def test_perspective_correct_interpolation_jacobian_preset(self): """Tests the Jacobian of perspective_correct_interpolation.""" vertices_init = np.tile( ((-0.2857143, 0.2857143, 5.0), (0.2857143, 0.2857143, 0.5), (0.0, -0.2857143, 1.0)), (2, 1, 1)) attributes_init = np.tile( (((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))), (2, 1, 1)) pixel_position_init = np.array(((125.5, 375.5), (250.5, 250.5))) camera_position_init = np.tile((0.0, 0.0, 0.0), (2, 3, 1)) look_at_init = np.tile((0.0, 0.0, 1.0), (2, 3, 1)) up_vector_init = np.tile((0.0, 1.0, 0.0), (2, 3, 1)) vertical_field_of_view_init = np.tile((1.0471975511965976,), (2, 3, 1)) screen_dimensions_init = np.tile((501.0, 501.0), (2, 3, 1)) near_init = np.tile((0.01,), (2, 3, 1)) far_init = np.tile((10.0,), (2, 3, 1)) lower_left_corner_init = np.tile((0.0, 0.0), (2, 3, 1)) # Build matrices. model_to_eye_matrix_init = look_at.right_handed(camera_position_init, look_at_init, up_vector_init) perspective_matrix_init = perspective.right_handed( vertical_field_of_view_init, screen_dimensions_init[..., 0:1] / screen_dimensions_init[..., 1:2], near_init, far_init) self.assert_jacobian_is_correct_fn(glm.perspective_correct_interpolation, [ vertices_init, attributes_init, pixel_position_init, model_to_eye_matrix_init, perspective_matrix_init, screen_dimensions_init, lower_left_corner_init ]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_perspective_correct_interpolation_jacobian_random(self): """Tests the Jacobian of perspective_correct_interpolation.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() vertices_init = np.random.uniform(size=tensor_shape + [3, 3]) num_attributes = np.random.randint(1, 10) attributes_init = np.random.uniform(size=tensor_shape + [3, num_attributes]) pixel_position_init = np.random.uniform(size=tensor_shape + [2]) camera_position_init = np.random.uniform(size=tensor_shape + [3, 3]) look_at_init = np.random.uniform(size=tensor_shape + [3, 3]) up_vector_init = np.random.uniform(size=tensor_shape + [3, 3]) vertical_field_of_view_init = np.random.uniform( 0.1, 1.0, size=tensor_shape + [3, 1]) screen_dimensions_init = np.random.uniform( 1.0, 10.0, size=tensor_shape + [3, 2]) near_init = np.random.uniform(1.0, 10.0, size=tensor_shape + [3, 1]) far_init = near_init + np.random.uniform( 0.1, 1.0, size=tensor_shape + [3, 1]) lower_left_corner_init = np.random.uniform(size=tensor_shape + [3, 2]) # Build matrices. model_to_eye_matrix_init = look_at.right_handed(camera_position_init, look_at_init, up_vector_init) perspective_matrix_init = perspective.right_handed( vertical_field_of_view_init, screen_dimensions_init[..., 0:1] / screen_dimensions_init[..., 1:2], near_init, far_init) self.assert_jacobian_is_correct_fn( glm.perspective_correct_interpolation, [ vertices_init, attributes_init, pixel_position_init, model_to_eye_matrix_init, perspective_matrix_init, screen_dimensions_init, lower_left_corner_init ], atol=1e-4) @parameterized.parameters( ((3, 3), (2,), (4, 4), (4, 4), (2,)), ((3, 3), (7, 2), (4, 4), (4, 4), (2,)), ((3, 3), (None, 2), (4, 4), (4, 4), (2,)), ((7, 3, 3), (2,), (4, 4), (4, 4), (2,)), ((None, 3, 3), (2,), (4, 4), (4, 4), (2,)), ) def test_perspective_correct_barycentrics_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(glm.perspective_correct_barycentrics, shapes) @parameterized.parameters( ("must have exactly 2 dimensions in axis -1", (3, 3), (2,), (4, 4), (4, 4), (3,)), ("must have exactly 3 dimensions in axis -1", (3, 4), (2,), (4, 4), (4, 4), (3,)), ("must have exactly 3 dimensions in axis -2", (4, 3), (2,), (4, 4), (4, 4), (3,)), ) def test_perspective_correct_barycentrics_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(glm.perspective_correct_barycentrics, error_msg, shapes) def test_perspective_correct_barycentrics_preset(self): """Tests that perspective_correct_barycentrics generates expected results.""" camera_origin = np.array((0.0, 0.0, 0.0)) camera_up = np.array((0.0, 1.0, 0.0)) look_at_point = np.array((0.0, 0.0, 1.0)) fov = np.array((90.0 * np.math.pi / 180.0,)) bottom_left = np.array((0.0, 0.0)) image_size = np.array((501.0, 501.0)) near_plane = np.array((0.01,)) far_plane = np.array((10.0,)) batch_size = np.random.randint(1, 5) triangle_x_y = np.random.uniform(-10.0, 10.0, (batch_size, 3, 2)) triangle_z = np.random.uniform(2.0, 10.0, (batch_size, 3, 1)) triangles = np.concatenate((triangle_x_y, triangle_z), axis=-1) # Builds barycentric weights. barycentric_weights = np.random.uniform(size=(batch_size, 3)) barycentric_weights = barycentric_weights / np.sum( barycentric_weights, axis=-1, keepdims=True) # Barycentric interpolation of vertex positions. convex_combination = np.einsum("ba, bac -> bc", barycentric_weights, triangles) # Build matrices. model_to_eye_matrix = look_at.right_handed(camera_origin, look_at_point, camera_up) perspective_matrix = perspective.right_handed( fov, (image_size[0:1] / image_size[1:2]), near_plane, far_plane) # Computes where those points project in screen coordinates. pixel_position, _ = glm.model_to_screen(convex_combination, model_to_eye_matrix, perspective_matrix, image_size, bottom_left) prediction = glm.perspective_correct_barycentrics(triangles, pixel_position[..., 0:2], model_to_eye_matrix, perspective_matrix, image_size, bottom_left) self.assertAllClose(prediction, barycentric_weights) def test_perspective_correct_barycentrics_jacobian_random(self): """Tests the Jacobian of perspective_correct_barycentrics.""" tensor_size = np.random.randint(1, 3) tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() vertices_init = np.random.uniform(size=tensor_shape + [3, 3]) pixel_position_init = np.random.uniform(size=tensor_shape + [2]) camera_position_init = np.random.uniform(size=tensor_shape + [3, 3]) look_at_init = np.random.uniform(size=tensor_shape + [3, 3]) up_vector_init = np.random.uniform(size=tensor_shape + [3, 3]) vertical_field_of_view_init = np.random.uniform( 0.1, 1.0, size=tensor_shape + [3, 1]) screen_dimensions_init = np.random.uniform( 1.0, 10.0, size=tensor_shape + [3, 2]) near_init = np.random.uniform(1.0, 10.0, size=tensor_shape + [3, 1]) far_init = near_init + np.random.uniform( 0.1, 1.0, size=tensor_shape + [3, 1]) lower_left_corner_init = np.random.uniform(size=tensor_shape + [3, 2]) # Build matrices. model_to_eye_matrix_init = look_at.right_handed(camera_position_init, look_at_init, up_vector_init) perspective_matrix_init = perspective.right_handed( vertical_field_of_view_init, screen_dimensions_init[..., 0:1] / screen_dimensions_init[..., 1:2], near_init, far_init) self.assert_jacobian_is_correct_fn( glm.perspective_correct_barycentrics, [ vertices_init, pixel_position_init, model_to_eye_matrix_init, perspective_matrix_init, screen_dimensions_init, lower_left_corner_init ], atol=1e-4) @parameterized.parameters( ((3, 7), (3,)), ((2, 3, 7), (2, 3)), ((None, 3, 7), (None, 3)), ) def test_interpolate_attributes_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(glm.interpolate_attributes, shapes) @parameterized.parameters( ("must have exactly 3 dimensions in axis -2", (2, 7), (3,)), ("must have exactly 3 dimensions in axis -1", (3, 7), (2,)), ("Not all batch dimensions are broadcast-compatible", (5, 3, 7), (4, 3)), ) def test_interpolate_attributes_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(glm.interpolate_attributes, error_msg, shapes) def test_interpolate_attributes_random(self): """Checks the output of interpolate_attributes.""" attributes = np.random.uniform(-1.0, 1.0, size=(3,)) barycentric = np.random.uniform(0.0, 1.0, size=(3,)) barycentric = barycentric / np.linalg.norm( barycentric, axis=-1, ord=1, keepdims=True) groundtruth = np.sum(attributes * barycentric, keepdims=True) attributes = np.reshape(attributes, (3, 1)) prediction = glm.interpolate_attributes(attributes, barycentric) self.assertAllClose(groundtruth, prediction) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_interpolate_attributes_jacobian_random(self): """Tests the jacobian of interpolate_attributes.""" batch_size = np.random.randint(1, 5) attributes = np.random.uniform(-1.0, 1.0, size=(batch_size, 3, 1)) barycentric = np.random.uniform( 0.0, 1.0, size=( batch_size, 3, )) barycentric = barycentric / np.linalg.norm( barycentric, axis=-1, ord=1, keepdims=True) self.assert_jacobian_is_correct_fn(glm.interpolate_attributes, [attributes, barycentric]) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/util/shape.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Shape utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np import six import tensorflow as tf def _broadcast_shape_helper(shape_x, shape_y): """Helper function for is_broadcast_compatible and broadcast_shape. Args: shape_x: A `TensorShape`. shape_y: A `TensorShape`. Returns: Returns None if the shapes are not broadcast compatible, or a list containing the broadcasted dimensions otherwise. """ # To compute the broadcasted dimensions, we zip together shape_x and shape_y, # and pad with 1 to make them the same length. broadcasted_dims = reversed( list( six.moves.zip_longest( reversed(shape_x.dims), reversed(shape_y.dims), fillvalue=tf.compat.v1.Dimension(1)))) # Next we combine the dimensions according to the numpy broadcasting rules. # http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html return_dims = [] for (dim_x, dim_y) in broadcasted_dims: if dim_x.value is None or dim_y.value is None: # One or both dimensions is unknown. If either dimension is greater than # 1, we assume that the program is correct, and the other dimension will # be broadcast to match it. if dim_x.value is not None and dim_x.value > 1: return_dims.append(dim_x) elif dim_y.value is not None and dim_y.value > 1: return_dims.append(dim_y) else: return_dims.append(None) elif dim_x.value == 1: # We will broadcast dim_x to dim_y. return_dims.append(dim_y) elif dim_y.value == 1: # We will broadcast dim_y to dim_x. return_dims.append(dim_x) elif dim_x.value == dim_y.value: # The dimensions are compatible, so output is the same size in that # dimension. return_dims.append(dim_x.merge_with(dim_y)) else: return None return return_dims def is_broadcast_compatible(shape_x, shape_y): """Returns True if `shape_x` and `shape_y` are broadcast compatible. Args: shape_x: A `TensorShape`. shape_y: A `TensorShape`. Returns: True if a shape exists that both `shape_x` and `shape_y` can be broadcasted to. False otherwise. """ if shape_x.ndims is None or shape_y.ndims is None: return False return _broadcast_shape_helper(shape_x, shape_y) is not None def get_broadcasted_shape(shape_x, shape_y): """Returns the common shape for broadcast compatible shapes. Args: shape_x: A `TensorShape`. shape_y: A `TensorShape`. Returns: Returns None if the shapes are not broadcast compatible, or a list containing the broadcasted dimensions otherwise. """ if shape_x.ndims is None or shape_y.ndims is None: return None return _broadcast_shape_helper(shape_x, shape_y) def _check_type(variable, variable_name, expected_type): """Helper function for checking that inputs are of expected types.""" if isinstance(expected_type, (list, tuple)): expected_type_name = 'list or tuple' else: expected_type_name = expected_type.__name__ if not isinstance(variable, expected_type): raise ValueError('{} must be of type {}, but it is {}'.format( variable_name, expected_type_name, type(variable).__name__)) def _fix_axis_dim_pairs(pairs, name): """Helper function to make `pairs` a list if needed.""" if isinstance(pairs[0], int): pairs = [pairs] for pair in pairs: if len(pair) != 2: raise ValueError( '{} must consist of axis-value pairs, but found {}'.format( name, pair)) return pairs def _get_dim(tensor, axis): """Returns dimensionality of a tensor for a given axis.""" return tf.compat.v1.dimension_value(tensor.shape[axis]) def check_static(tensor, has_rank=None, has_rank_greater_than=None, has_rank_less_than=None, has_dim_equals=None, has_dim_greater_than=None, has_dim_less_than=None, tensor_name='tensor'): """Checks static shapes for rank and dimension constraints. This function can be used to check a tensor's shape for multiple rank and dimension constraints at the same time. Args: tensor: Any tensor with a static shape. has_rank: An int or `None`. If not `None`, the function checks if the rank of the `tensor` equals to `has_rank`. has_rank_greater_than: An int or `None`. If not `None`, the function checks if the rank of the `tensor` is greater than `has_rank_greater_than`. has_rank_less_than: An int or `None`. If not `None`, the function checks if the rank of the `tensor` is less than `has_rank_less_than`. has_dim_equals: Either a tuple or list containing a single pair of `int`s, or a list or tuple containing multiple such pairs. Each pair is in the form (`axis`, `dim`), which means the function should check if `tensor.shape[axis] == dim`. has_dim_greater_than: Either a tuple or list containing a single pair of `int`s, or a list or tuple containing multiple such pairs. Each pair is in the form (`axis`, `dim`), which means the function should check if `tensor.shape[axis] > dim`. has_dim_less_than: Either a tuple or list containing a single pair of `int`s, or a list or tuple containing multiple such pairs. Each pair is in the form (`axis`, `dim`), which means the function should check if `tensor.shape[axis] < dim`. tensor_name: A name for `tensor` to be used in the error message if one is thrown. Raises: ValueError: If any input is not of the expected types, or if one of the checks described above fails. """ rank = tensor.shape.ndims def _raise_value_error_for_rank(variable, error_msg): raise ValueError( '{} must have a rank {} {}, but it has rank {} and shape {}'.format( tensor_name, error_msg, variable, rank, tensor.shape.as_list())) def _raise_value_error_for_dim(tensor_name, error_msg, axis, value): raise ValueError( '{} must have {} {} dimensions in axis {}, but it has shape {}'.format( tensor_name, error_msg, value, axis, tensor.shape.as_list())) if has_rank is not None: _check_type(has_rank, 'has_rank', int) if rank != has_rank: _raise_value_error_for_rank(has_rank, 'of') if has_rank_greater_than is not None: _check_type(has_rank_greater_than, 'has_rank_greater_than', int) if rank <= has_rank_greater_than: _raise_value_error_for_rank(has_rank_greater_than, 'greater than') if has_rank_less_than is not None: _check_type(has_rank_less_than, 'has_rank_less_than', int) if rank >= has_rank_less_than: _raise_value_error_for_rank(has_rank_less_than, 'less than') if has_dim_equals is not None: _check_type(has_dim_equals, 'has_dim_equals', (list, tuple)) has_dim_equals = _fix_axis_dim_pairs(has_dim_equals, 'has_dim_equals') for axis, value in has_dim_equals: if _get_dim(tensor, axis) != value: _raise_value_error_for_dim(tensor_name, 'exactly', axis, value) if has_dim_greater_than is not None: _check_type(has_dim_greater_than, 'has_dim_greater_than', (list, tuple)) has_dim_greater_than = _fix_axis_dim_pairs(has_dim_greater_than, 'has_dim_greater_than') for axis, value in has_dim_greater_than: if not _get_dim(tensor, axis) > value: _raise_value_error_for_dim(tensor_name, 'greater than', axis, value) if has_dim_less_than is not None: _check_type(has_dim_less_than, 'has_dim_less_than', (list, tuple)) has_dim_less_than = _fix_axis_dim_pairs(has_dim_less_than, 'has_dim_less_than') for axis, value in has_dim_less_than: if not _get_dim(tensor, axis) < value: _raise_value_error_for_dim(tensor_name, 'less than', axis, value) def _check_tensors(tensors, tensors_name): """Helper function to check the type and length of tensors.""" _check_type(tensors, tensors_name, (list, tuple)) if len(tensors) < 2: raise ValueError('At least 2 tensors are required.') def _check_tensor_axis_lists(tensors, tensors_name, axes, axes_name): """Helper function to check that lengths of `tensors` and `axes` match.""" _check_type(axes, axes_name, (list, tuple)) if len(tensors) != len(axes): raise ValueError( '{} and {} must have the same length, but are {} and {}.'.format( tensors_name, axes_name, len(tensors), len(axes))) def _fix_axes(tensors, axes, allow_negative): """Makes all axes positive and checks for out of bound errors.""" axes = [ axis + tensor.shape.ndims if axis < 0 else axis for tensor, axis in zip(tensors, axes) ] if not all( ((allow_negative or (not allow_negative and axis >= 0)) and axis < tensor.shape.ndims) for tensor, axis in zip(tensors, axes)): rank_axis_pairs = zip([tensor.shape.ndims for tensor in tensors], axes) raise ValueError( 'Some axes are out of bounds. Given rank-axes pairs: {}'.format( [pair for pair in rank_axis_pairs])) return axes def _give_default_names(list_of_objects, name): """Helper function to give default names to objects for error messages.""" return [name + '_' + str(index) for index in range(len(list_of_objects))] def _all_are_equal(list_of_objects): """Helper function to check if all the items in a list are the same.""" if not list_of_objects: return True if isinstance(list_of_objects[0], list): list_of_objects = [tuple(obj) for obj in list_of_objects] return len(set(list_of_objects)) == 1 def _raise_error(tensor_names, batch_shapes): formatted_list = [(name, batch_shape) for name, batch_shape in zip(tensor_names, batch_shapes)] raise ValueError( 'Not all batch dimensions are identical: {}'.format(formatted_list)) def compare_batch_dimensions(tensors, last_axes, broadcast_compatible, initial_axes=0, tensor_names=None): """Compares batch dimensions for tensors with static shapes. Args: tensors: A list or tuple of tensors with static shapes to compare. last_axes: An `int` or a list or tuple of `int`s with the same length as `tensors`. If an `int`, it is assumed to be the same for all the tensors. Each entry should correspond to the last axis of the batch (with zero based indices). For instance, if there is only a single batch dimension, last axis should be `0`. broadcast_compatible: A 'bool', whether the batch shapes can be broadcast compatible in the numpy sense. initial_axes: An `int` or a list or tuple of `int`s with the same length as `tensors`. If an `int`, it is assumed to be the same for all the tensors. Each entry should correspond to the first axis of the batch (with zero based indices). Default value is `0`. tensor_names: Names of `tensors` to be used in the error message if one is thrown. If left as `None`, `tensor_i` is used. Raises: ValueError: If inputs have unexpected types, or if given axes are out of bounds, or if the check fails. """ _check_tensors(tensors, 'tensors') if isinstance(initial_axes, int): initial_axes = [initial_axes] * len(tensors) if isinstance(last_axes, int): last_axes = [last_axes] * len(tensors) _check_tensor_axis_lists(tensors, 'tensors', initial_axes, 'initial_axes') _check_tensor_axis_lists(tensors, 'tensors', last_axes, 'last_axes') initial_axes = _fix_axes(tensors, initial_axes, allow_negative=True) last_axes = _fix_axes(tensors, last_axes, allow_negative=True) batch_shapes = [ tensor.shape[init:last + 1] for tensor, init, last in zip(tensors, initial_axes, last_axes) ] if tensor_names is None: tensor_names = _give_default_names(tensors, 'tensor') if not broadcast_compatible: batch_ndims = [batch_shape.ndims for batch_shape in batch_shapes] batch_shapes = [batch_shape.as_list() for batch_shape in batch_shapes] if not _all_are_equal(batch_ndims): # If not all batch shapes have the same length, they cannot be identical. _raise_error(tensor_names, batch_shapes) for dims in zip(*batch_shapes): if _all_are_equal(dims): # Continue if all dimensions are None or have the same value. continue if None not in dims: # If all dimensions are known at this point, they are not identical. _raise_error(tensor_names, batch_shapes) # At this point dims must consist of both None's and int's. if len(set(dims)) != 2: # set(dims) should return (None, some_int). # Otherwise shapes are not identical. _raise_error(tensor_names, batch_shapes) else: if not all( is_broadcast_compatible(shape1, shape2) for shape1, shape2 in itertools.combinations(batch_shapes, 2)): raise ValueError( 'Not all batch dimensions are broadcast-compatible: {}'.format([ (name, batch_shape.as_list()) for name, batch_shape in zip(tensor_names, batch_shapes) ])) def compare_dimensions(tensors, axes, tensor_names=None): """Compares dimensions of tensors with static or dynamic shapes. Args: tensors: A list or tuple of tensors to compare. axes: An `int` or a list or tuple of `int`s with the same length as `tensors`. If an `int`, it is assumed to be the same for all the tensors. Each entry should correspond to the axis of the tensor being compared. tensor_names: Names of `tensors` to be used in the error message if one is thrown. If left as `None`, their `Tensor.name` fields are used instead. Raises: ValueError: If inputs have unexpected types, or if given axes are out of bounds, or if the check fails. """ _check_tensors(tensors, 'tensors') if isinstance(axes, int): axes = [axes] * len(tensors) _check_tensor_axis_lists(tensors, 'tensors', axes, 'axes') axes = _fix_axes(tensors, axes, allow_negative=False) if tensor_names is None: tensor_names = _give_default_names(tensors, 'tensor') dimensions = [_get_dim(tensor, axis) for tensor, axis in zip(tensors, axes)] if not _all_are_equal(dimensions): raise ValueError('Tensors {} must have the same number of dimensions in ' 'axes {}, but they are {}.'.format( list(tensor_names), list(axes), list(dimensions))) def is_static(tensor_shape): """Checks if the given tensor shape is static.""" if isinstance(tensor_shape, (list, tuple)): return None not in tensor_shape else: return None not in tensor_shape.as_list() def add_batch_dimensions(tensor, tensor_name, batch_shape, last_axis=None): """Broadcasts tensor to match batch dimensions. It will either broadcast to all provided batch dimensions, therefore increasing tensor shape by len(batch_shape) dimensions or will do nothing if batch dimensions already present and equal to expected batch dimensions. Args: tensor: A tensor to broadcast of a shape [A1, ..., An, B1, ..., Bn]. Where [A1, ..., An] is batch dimensions (it is allowed to have no batch dimensions), and [B1, ..., Bn] are other tensor dimensions. If [A1, ..., An] are present but different from values in `batch_shape` the error will be thrown. tensor_name: Name of `tensor` to be used in the error message if one is batch_shape: list of `int` representing desired batch dimensions. last_axis: An `int` corresponding to the last axis of the batch (with zero based indices). For instance, if there is only a single batch dimension, last axis should be `0`. If there is no batch dimensions it must be set to `None`. thrown. Returns: Tensor of a shape `batch_shape` + [B1, ..., Bn] or unmodified tensor if `batch_shape` = [A1, ..., An]. Raises: ValueError if tensor already has batch dimensions different from desired one. """ if last_axis is not None: last_axis = _fix_axes([tensor], [last_axis], allow_negative=True)[0] tensor_batch_shape = tensor.shape.as_list()[:last_axis + 1] if np.array_equal(tensor_batch_shape, batch_shape): return tensor elif tensor_batch_shape: raise ValueError( 'Tensor {} has batch dimensions different from target ' 'one. Found {}, but expected no batch dimensions or {}'.format( tensor_name, tensor.shape[:last_axis + 1], batch_shape)) return tf.broadcast_to(tensor, batch_shape + list(tensor.shape)) # The util functions or classes are not exported. __all__ = []
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Shape utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np import six import tensorflow as tf def _broadcast_shape_helper(shape_x, shape_y): """Helper function for is_broadcast_compatible and broadcast_shape. Args: shape_x: A `TensorShape`. shape_y: A `TensorShape`. Returns: Returns None if the shapes are not broadcast compatible, or a list containing the broadcasted dimensions otherwise. """ # To compute the broadcasted dimensions, we zip together shape_x and shape_y, # and pad with 1 to make them the same length. broadcasted_dims = reversed( list( six.moves.zip_longest( reversed(shape_x.dims), reversed(shape_y.dims), fillvalue=tf.compat.v1.Dimension(1)))) # Next we combine the dimensions according to the numpy broadcasting rules. # http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html return_dims = [] for (dim_x, dim_y) in broadcasted_dims: if dim_x.value is None or dim_y.value is None: # One or both dimensions is unknown. If either dimension is greater than # 1, we assume that the program is correct, and the other dimension will # be broadcast to match it. if dim_x.value is not None and dim_x.value > 1: return_dims.append(dim_x) elif dim_y.value is not None and dim_y.value > 1: return_dims.append(dim_y) else: return_dims.append(None) elif dim_x.value == 1: # We will broadcast dim_x to dim_y. return_dims.append(dim_y) elif dim_y.value == 1: # We will broadcast dim_y to dim_x. return_dims.append(dim_x) elif dim_x.value == dim_y.value: # The dimensions are compatible, so output is the same size in that # dimension. return_dims.append(dim_x.merge_with(dim_y)) else: return None return return_dims def is_broadcast_compatible(shape_x, shape_y): """Returns True if `shape_x` and `shape_y` are broadcast compatible. Args: shape_x: A `TensorShape`. shape_y: A `TensorShape`. Returns: True if a shape exists that both `shape_x` and `shape_y` can be broadcasted to. False otherwise. """ if shape_x.ndims is None or shape_y.ndims is None: return False return _broadcast_shape_helper(shape_x, shape_y) is not None def get_broadcasted_shape(shape_x, shape_y): """Returns the common shape for broadcast compatible shapes. Args: shape_x: A `TensorShape`. shape_y: A `TensorShape`. Returns: Returns None if the shapes are not broadcast compatible, or a list containing the broadcasted dimensions otherwise. """ if shape_x.ndims is None or shape_y.ndims is None: return None return _broadcast_shape_helper(shape_x, shape_y) def _check_type(variable, variable_name, expected_type): """Helper function for checking that inputs are of expected types.""" if isinstance(expected_type, (list, tuple)): expected_type_name = 'list or tuple' else: expected_type_name = expected_type.__name__ if not isinstance(variable, expected_type): raise ValueError('{} must be of type {}, but it is {}'.format( variable_name, expected_type_name, type(variable).__name__)) def _fix_axis_dim_pairs(pairs, name): """Helper function to make `pairs` a list if needed.""" if isinstance(pairs[0], int): pairs = [pairs] for pair in pairs: if len(pair) != 2: raise ValueError( '{} must consist of axis-value pairs, but found {}'.format( name, pair)) return pairs def _get_dim(tensor, axis): """Returns dimensionality of a tensor for a given axis.""" return tf.compat.v1.dimension_value(tensor.shape[axis]) def check_static(tensor, has_rank=None, has_rank_greater_than=None, has_rank_less_than=None, has_dim_equals=None, has_dim_greater_than=None, has_dim_less_than=None, tensor_name='tensor'): """Checks static shapes for rank and dimension constraints. This function can be used to check a tensor's shape for multiple rank and dimension constraints at the same time. Args: tensor: Any tensor with a static shape. has_rank: An int or `None`. If not `None`, the function checks if the rank of the `tensor` equals to `has_rank`. has_rank_greater_than: An int or `None`. If not `None`, the function checks if the rank of the `tensor` is greater than `has_rank_greater_than`. has_rank_less_than: An int or `None`. If not `None`, the function checks if the rank of the `tensor` is less than `has_rank_less_than`. has_dim_equals: Either a tuple or list containing a single pair of `int`s, or a list or tuple containing multiple such pairs. Each pair is in the form (`axis`, `dim`), which means the function should check if `tensor.shape[axis] == dim`. has_dim_greater_than: Either a tuple or list containing a single pair of `int`s, or a list or tuple containing multiple such pairs. Each pair is in the form (`axis`, `dim`), which means the function should check if `tensor.shape[axis] > dim`. has_dim_less_than: Either a tuple or list containing a single pair of `int`s, or a list or tuple containing multiple such pairs. Each pair is in the form (`axis`, `dim`), which means the function should check if `tensor.shape[axis] < dim`. tensor_name: A name for `tensor` to be used in the error message if one is thrown. Raises: ValueError: If any input is not of the expected types, or if one of the checks described above fails. """ rank = tensor.shape.ndims def _raise_value_error_for_rank(variable, error_msg): raise ValueError( '{} must have a rank {} {}, but it has rank {} and shape {}'.format( tensor_name, error_msg, variable, rank, tensor.shape.as_list())) def _raise_value_error_for_dim(tensor_name, error_msg, axis, value): raise ValueError( '{} must have {} {} dimensions in axis {}, but it has shape {}'.format( tensor_name, error_msg, value, axis, tensor.shape.as_list())) if has_rank is not None: _check_type(has_rank, 'has_rank', int) if rank != has_rank: _raise_value_error_for_rank(has_rank, 'of') if has_rank_greater_than is not None: _check_type(has_rank_greater_than, 'has_rank_greater_than', int) if rank <= has_rank_greater_than: _raise_value_error_for_rank(has_rank_greater_than, 'greater than') if has_rank_less_than is not None: _check_type(has_rank_less_than, 'has_rank_less_than', int) if rank >= has_rank_less_than: _raise_value_error_for_rank(has_rank_less_than, 'less than') if has_dim_equals is not None: _check_type(has_dim_equals, 'has_dim_equals', (list, tuple)) has_dim_equals = _fix_axis_dim_pairs(has_dim_equals, 'has_dim_equals') for axis, value in has_dim_equals: if _get_dim(tensor, axis) != value: _raise_value_error_for_dim(tensor_name, 'exactly', axis, value) if has_dim_greater_than is not None: _check_type(has_dim_greater_than, 'has_dim_greater_than', (list, tuple)) has_dim_greater_than = _fix_axis_dim_pairs(has_dim_greater_than, 'has_dim_greater_than') for axis, value in has_dim_greater_than: if not _get_dim(tensor, axis) > value: _raise_value_error_for_dim(tensor_name, 'greater than', axis, value) if has_dim_less_than is not None: _check_type(has_dim_less_than, 'has_dim_less_than', (list, tuple)) has_dim_less_than = _fix_axis_dim_pairs(has_dim_less_than, 'has_dim_less_than') for axis, value in has_dim_less_than: if not _get_dim(tensor, axis) < value: _raise_value_error_for_dim(tensor_name, 'less than', axis, value) def _check_tensors(tensors, tensors_name): """Helper function to check the type and length of tensors.""" _check_type(tensors, tensors_name, (list, tuple)) if len(tensors) < 2: raise ValueError('At least 2 tensors are required.') def _check_tensor_axis_lists(tensors, tensors_name, axes, axes_name): """Helper function to check that lengths of `tensors` and `axes` match.""" _check_type(axes, axes_name, (list, tuple)) if len(tensors) != len(axes): raise ValueError( '{} and {} must have the same length, but are {} and {}.'.format( tensors_name, axes_name, len(tensors), len(axes))) def _fix_axes(tensors, axes, allow_negative): """Makes all axes positive and checks for out of bound errors.""" axes = [ axis + tensor.shape.ndims if axis < 0 else axis for tensor, axis in zip(tensors, axes) ] if not all( ((allow_negative or (not allow_negative and axis >= 0)) and axis < tensor.shape.ndims) for tensor, axis in zip(tensors, axes)): rank_axis_pairs = zip([tensor.shape.ndims for tensor in tensors], axes) raise ValueError( 'Some axes are out of bounds. Given rank-axes pairs: {}'.format( [pair for pair in rank_axis_pairs])) return axes def _give_default_names(list_of_objects, name): """Helper function to give default names to objects for error messages.""" return [name + '_' + str(index) for index in range(len(list_of_objects))] def _all_are_equal(list_of_objects): """Helper function to check if all the items in a list are the same.""" if not list_of_objects: return True if isinstance(list_of_objects[0], list): list_of_objects = [tuple(obj) for obj in list_of_objects] return len(set(list_of_objects)) == 1 def _raise_error(tensor_names, batch_shapes): formatted_list = [(name, batch_shape) for name, batch_shape in zip(tensor_names, batch_shapes)] raise ValueError( 'Not all batch dimensions are identical: {}'.format(formatted_list)) def compare_batch_dimensions(tensors, last_axes, broadcast_compatible, initial_axes=0, tensor_names=None): """Compares batch dimensions for tensors with static shapes. Args: tensors: A list or tuple of tensors with static shapes to compare. last_axes: An `int` or a list or tuple of `int`s with the same length as `tensors`. If an `int`, it is assumed to be the same for all the tensors. Each entry should correspond to the last axis of the batch (with zero based indices). For instance, if there is only a single batch dimension, last axis should be `0`. broadcast_compatible: A 'bool', whether the batch shapes can be broadcast compatible in the numpy sense. initial_axes: An `int` or a list or tuple of `int`s with the same length as `tensors`. If an `int`, it is assumed to be the same for all the tensors. Each entry should correspond to the first axis of the batch (with zero based indices). Default value is `0`. tensor_names: Names of `tensors` to be used in the error message if one is thrown. If left as `None`, `tensor_i` is used. Raises: ValueError: If inputs have unexpected types, or if given axes are out of bounds, or if the check fails. """ _check_tensors(tensors, 'tensors') if isinstance(initial_axes, int): initial_axes = [initial_axes] * len(tensors) if isinstance(last_axes, int): last_axes = [last_axes] * len(tensors) _check_tensor_axis_lists(tensors, 'tensors', initial_axes, 'initial_axes') _check_tensor_axis_lists(tensors, 'tensors', last_axes, 'last_axes') initial_axes = _fix_axes(tensors, initial_axes, allow_negative=True) last_axes = _fix_axes(tensors, last_axes, allow_negative=True) batch_shapes = [ tensor.shape[init:last + 1] for tensor, init, last in zip(tensors, initial_axes, last_axes) ] if tensor_names is None: tensor_names = _give_default_names(tensors, 'tensor') if not broadcast_compatible: batch_ndims = [batch_shape.ndims for batch_shape in batch_shapes] batch_shapes = [batch_shape.as_list() for batch_shape in batch_shapes] if not _all_are_equal(batch_ndims): # If not all batch shapes have the same length, they cannot be identical. _raise_error(tensor_names, batch_shapes) for dims in zip(*batch_shapes): if _all_are_equal(dims): # Continue if all dimensions are None or have the same value. continue if None not in dims: # If all dimensions are known at this point, they are not identical. _raise_error(tensor_names, batch_shapes) # At this point dims must consist of both None's and int's. if len(set(dims)) != 2: # set(dims) should return (None, some_int). # Otherwise shapes are not identical. _raise_error(tensor_names, batch_shapes) else: if not all( is_broadcast_compatible(shape1, shape2) for shape1, shape2 in itertools.combinations(batch_shapes, 2)): raise ValueError( 'Not all batch dimensions are broadcast-compatible: {}'.format([ (name, batch_shape.as_list()) for name, batch_shape in zip(tensor_names, batch_shapes) ])) def compare_dimensions(tensors, axes, tensor_names=None): """Compares dimensions of tensors with static or dynamic shapes. Args: tensors: A list or tuple of tensors to compare. axes: An `int` or a list or tuple of `int`s with the same length as `tensors`. If an `int`, it is assumed to be the same for all the tensors. Each entry should correspond to the axis of the tensor being compared. tensor_names: Names of `tensors` to be used in the error message if one is thrown. If left as `None`, their `Tensor.name` fields are used instead. Raises: ValueError: If inputs have unexpected types, or if given axes are out of bounds, or if the check fails. """ _check_tensors(tensors, 'tensors') if isinstance(axes, int): axes = [axes] * len(tensors) _check_tensor_axis_lists(tensors, 'tensors', axes, 'axes') axes = _fix_axes(tensors, axes, allow_negative=False) if tensor_names is None: tensor_names = _give_default_names(tensors, 'tensor') dimensions = [_get_dim(tensor, axis) for tensor, axis in zip(tensors, axes)] if not _all_are_equal(dimensions): raise ValueError('Tensors {} must have the same number of dimensions in ' 'axes {}, but they are {}.'.format( list(tensor_names), list(axes), list(dimensions))) def is_static(tensor_shape): """Checks if the given tensor shape is static.""" if isinstance(tensor_shape, (list, tuple)): return None not in tensor_shape else: return None not in tensor_shape.as_list() def add_batch_dimensions(tensor, tensor_name, batch_shape, last_axis=None): """Broadcasts tensor to match batch dimensions. It will either broadcast to all provided batch dimensions, therefore increasing tensor shape by len(batch_shape) dimensions or will do nothing if batch dimensions already present and equal to expected batch dimensions. Args: tensor: A tensor to broadcast of a shape [A1, ..., An, B1, ..., Bn]. Where [A1, ..., An] is batch dimensions (it is allowed to have no batch dimensions), and [B1, ..., Bn] are other tensor dimensions. If [A1, ..., An] are present but different from values in `batch_shape` the error will be thrown. tensor_name: Name of `tensor` to be used in the error message if one is batch_shape: list of `int` representing desired batch dimensions. last_axis: An `int` corresponding to the last axis of the batch (with zero based indices). For instance, if there is only a single batch dimension, last axis should be `0`. If there is no batch dimensions it must be set to `None`. thrown. Returns: Tensor of a shape `batch_shape` + [B1, ..., Bn] or unmodified tensor if `batch_shape` = [A1, ..., An]. Raises: ValueError if tensor already has batch dimensions different from desired one. """ if last_axis is not None: last_axis = _fix_axes([tensor], [last_axis], allow_negative=True)[0] tensor_batch_shape = tensor.shape.as_list()[:last_axis + 1] if np.array_equal(tensor_batch_shape, batch_shape): return tensor elif tensor_batch_shape: raise ValueError( 'Tensor {} has batch dimensions different from target ' 'one. Found {}, but expected no batch dimensions or {}'.format( tensor_name, tensor.shape[:last_axis + 1], batch_shape)) return tf.broadcast_to(tensor, batch_shape + list(tensor.shape)) # The util functions or classes are not exported. __all__ = []
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/geometry/representation/mesh/sampler.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Computes a weighted point sampling of a triangular mesh. This op computes a uniform sampling of points on the surface of the mesh. Points are sampled from the surface of each triangle using a uniform distribution, proportional to a specified face density (e.g. face area). Uses the approach mentioned in the TOG 2002 paper "Shape distributions" (https://dl.acm.org/citation.cfm?id=571648) to generate random barycentric coordinates. This op can be used for several tasks, including better mesh reconstruction. For example, see these recent papers demonstrating reconstruction losses using this op: 1. "GEOMetrics: Exploiting Geometric Structure for Graph-Encoded Objects" (https://arxiv.org/abs/1901.11461) ICML 2019. 2. "Mesh R-CNN" (https://arxiv.org/abs/1906.02739) ICCV 2019. Op is differentiable w.r.t mesh vertex positions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.geometry.representation import triangle from tensorflow_graphics.geometry.representation.mesh import normals from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def triangle_area(vertex0, vertex1, vertex2, name=None): """Computes triangle areas. Note: Computed triangle area = 0.5 * | e1 x e2 | where e1 and e2 are edges of triangle. In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. In the following, A1 to An are optional batch dimensions. Args: vertex0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the first vertex of a triangle. vertex1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the second vertex of a triangle. vertex2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the third vertex of a triangle. name: A name for this op. Defaults to "triangle_area". Returns: A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents the triangle areas. """ with tf.compat.v1.name_scope(name, "triangle_area"): vertex0 = tf.convert_to_tensor(value=vertex0) vertex1 = tf.convert_to_tensor(value=vertex1) vertex2 = tf.convert_to_tensor(value=vertex2) triangle_normals = triangle.normal( vertex0, vertex1, vertex2, normalize=False) areas = 0.5 * tf.linalg.norm(tensor=triangle_normals, axis=-1) return areas def _random_categorical_sample(num_samples, weights, seed=None, stateless=False, name=None, sample_dtype=tf.int32): """Samples from a categorical distribution with arbitrary batch dimensions. Note: In the following, A1 to An are optional batch dimensions. Args: num_samples: An `int32` scalar denoting the number of samples to generate per mesh. weights: A `float` tensor of shape `[A1, ..., An, F]` where F is number of faces. All weights must be > 0. seed: Optional random seed, value depends on `stateless`. stateless: Optional flag to use stateless random sampler. If stateless=True, then `seed` must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate the same reproducible sequence across calls. If stateless=False, then a stateful random number generator is used (default behavior). name: Name for op. Defaults to "random_categorical_sample". sample_dtype: Type of output samples. Returns: A `sample_dtype` tensor of shape `[A1, ..., An, num_samples]`. """ with tf.compat.v1.name_scope(name, "random_categorical_sample"): asserts.assert_all_above(weights, 0) logits = tf.math.log(weights) num_faces = tf.shape(input=logits)[-1] batch_shape = tf.shape(input=logits)[:-1] logits_2d = tf.reshape(logits, [-1, num_faces]) if stateless: seed = tf.convert_to_tensor(value=seed) shape.check_static( tensor=seed, tensor_name="seed", has_dim_equals=(-1, 2)) sample_fn = tf.random.stateless_categorical else: sample_fn = tf.random.categorical draws = sample_fn( logits=logits_2d, num_samples=num_samples, dtype=sample_dtype, seed=seed) samples = tf.reshape( draws, shape=tf.concat((batch_shape, (num_samples,)), axis=0)) return samples def generate_random_face_indices(num_samples, face_weights, seed=None, stateless=False, name=None): """Generate a sample of face ids given per face probability. Note: In the following, A1 to An are optional batch dimensions. Args: num_samples: An `int32` scalar denoting the number of samples to generate per mesh. face_weights: A `float` tensor of shape `[A1, ..., An, F]` where F is number of faces. All weights must be > 0. seed: Optional seed for the random number generator. stateless: Optional flag to use stateless random sampler. If stateless=True, then `seed` must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate the same reproducible sequence across calls. If stateless=False, then a stateful random number generator is used (default behavior). name: Name for op. Defaults to "generate_random_face_indices". Returns: An `int32` tensor of shape `[A1, ..., An, num_samples]` denoting sampled face indices. """ with tf.compat.v1.name_scope(name, "generate_random_face_indices"): num_samples = tf.convert_to_tensor(value=num_samples) face_weights = tf.convert_to_tensor(value=face_weights) shape.check_static( tensor=face_weights, tensor_name="face_weights", has_rank_greater_than=0) shape.check_static( tensor=num_samples, tensor_name="num_samples", has_rank=0) face_weights = asserts.assert_all_above(face_weights, minval=0.0) eps = asserts.select_eps_for_division(face_weights.dtype) face_weights = face_weights + eps sampled_face_indices = _random_categorical_sample( num_samples=num_samples, weights=face_weights, seed=seed, stateless=stateless) return sampled_face_indices def generate_random_barycentric_coordinates(sample_shape, dtype=tf.dtypes.float32, seed=None, stateless=False, name=None): """Generate uniformly sampled random barycentric coordinates. Note: In the following, A1 to An are optional batch dimensions. Args: sample_shape: An `int` tensor with shape `[n+1,]` and values `(A1, ..., An, num_samples)` denoting total number of random samples drawn, where `n` is number of batch dimensions, and `num_samples` is the number of samples drawn for each mesh. dtype: Optional type of generated barycentric coordinates, defaults to float32. seed: An optional random seed. stateless: Optional flag to use stateless random sampler. If stateless=True, then `seed` must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate the same reproducible sequence across calls. If stateless=False, then a stateful random number generator is used (default behavior). name: Name for op. Defaults to "generate_random_barycentric_coordinates". Returns: A `dtype` tensor of shape [A1, ..., An, num_samples, 3], where the last dimension contains the sampled barycentric coordinates. """ with tf.compat.v1.name_scope(name, "generate_random_barycentric_coordinates"): sample_shape = tf.convert_to_tensor(value=sample_shape) shape.check_static( tensor=sample_shape, tensor_name="sample_shape", has_rank=1) sample_shape = tf.concat((sample_shape, (2,)), axis=0) if stateless: seed = tf.convert_to_tensor(value=seed) shape.check_static( tensor=seed, tensor_name="seed", has_dim_equals=(-1, 2)) sample_fn = tf.random.stateless_uniform else: sample_fn = tf.random.uniform random_uniform = sample_fn( shape=sample_shape, minval=0.0, maxval=1.0, dtype=dtype, seed=seed) random1 = tf.sqrt(random_uniform[..., 0]) random2 = random_uniform[..., 1] barycentric = tf.stack( (1 - random1, random1 * (1 - random2), random1 * random2), axis=-1) return barycentric def weighted_random_sample_triangle_mesh(vertex_attributes, faces, num_samples, face_weights, seed=None, stateless=False, name=None): """Performs a face probability weighted random sampling of a tri mesh. Note: In the following, A1 to An are optional batch dimensions. Args: vertex_attributes: A `float` tensor of shape `[A1, ..., An, V, D]`, where V is the number of vertices, and D is dimensionality of each vertex. faces: A `int` tensor of shape `[A1, ..., An, F, 3]`, where F is the number of faces. num_samples: A `int` 0-D tensor denoting number of samples to be drawn from each mesh. face_weights: A `float` tensor of shape ``[A1, ..., An, F]`, denoting unnormalized sampling probability of each face, where F is the number of faces. seed: Optional random seed. stateless: Optional flag to use stateless random sampler. If stateless=True, then seed must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate same sequence across calls. name: Name for op. Defaults to "weighted_random_sample_triangle_mesh". Returns: sample_points: A `float` tensor of shape `[A1, ..., An, num_samples, D]`, where D is dimensionality of each sampled point. sample_face_indices: A `int` tensor of shape `[A1, ..., An, num_samples]`. """ with tf.compat.v1.name_scope(name, "weighted_random_sample_triangle_mesh"): faces = tf.convert_to_tensor(value=faces) vertex_attributes = tf.convert_to_tensor(value=vertex_attributes) face_weights = tf.convert_to_tensor(value=face_weights) num_samples = tf.convert_to_tensor(value=num_samples) shape.check_static( tensor=vertex_attributes, tensor_name="vertex_attributes", has_rank_greater_than=1) shape.check_static( tensor=faces, tensor_name="faces", has_rank_greater_than=1) shape.check_static( tensor=face_weights, tensor_name="face_weights", has_rank_greater_than=0) shape.compare_batch_dimensions( tensors=(faces, face_weights), last_axes=(-2, -1), tensor_names=("faces", "face_weights"), broadcast_compatible=False) shape.compare_batch_dimensions( tensors=(vertex_attributes, faces, face_weights), last_axes=(-3, -3, -2), tensor_names=("vertex_attributes", "faces", "face_weights"), broadcast_compatible=False) asserts.assert_all_above(face_weights, 0) batch_dims = faces.shape.ndims - 2 batch_shape = faces.shape.as_list()[:-2] sample_shape = tf.concat( (batch_shape, tf.convert_to_tensor( value=(num_samples,), dtype=tf.int32)), axis=0) sample_face_indices = generate_random_face_indices( num_samples, face_weights, seed=seed, stateless=stateless) sample_vertex_indices = tf.gather( faces, sample_face_indices, batch_dims=batch_dims) sample_vertices = tf.gather( vertex_attributes, sample_vertex_indices, batch_dims=batch_dims) barycentric = generate_random_barycentric_coordinates( sample_shape, dtype=vertex_attributes.dtype, seed=seed, stateless=stateless) barycentric = tf.expand_dims(barycentric, axis=-1) sample_points = tf.math.multiply(sample_vertices, barycentric) sample_points = tf.reduce_sum(input_tensor=sample_points, axis=-2) return sample_points, sample_face_indices def area_weighted_random_sample_triangle_mesh(vertex_attributes, faces, num_samples, vertex_positions=None, seed=None, stateless=False, name=None): """Performs a face area weighted random sampling of a tri mesh. Note: In the following, A1 to An are optional batch dimensions. Args: vertex_attributes: A `float` tensor of shape `[A1, ..., An, V, D]`, where V is the number of vertices, and D is dimensionality of a feature defined on each vertex. If `vertex_positions` is not provided, then first 3 dimensions of `vertex_attributes` denote the vertex positions. faces: A `int` tensor of shape `[A1, ..., An, F, 3]`, where F is the number of faces. num_samples: An `int` scalar denoting number of samples to be drawn from each mesh. vertex_positions: An optional `float` tensor of shape `[A1, ..., An, V, 3]`, where V is the number of vertices. If None, then vertex_attributes[..., :3] is used as vertex positions. seed: Optional random seed. stateless: Optional flag to use stateless random sampler. If stateless=True, then seed must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate same sequence across calls. name: Name for op. Defaults to "area_weighted_random_sample_triangle_mesh". Returns: sample_pts: A `float` tensor of shape `[A1, ..., An, num_samples, D]`, where D is dimensionality of each sampled point. sample_face_indices: A `int` tensor of shape `[A1, ..., An, num_samples]`. """ with tf.compat.v1.name_scope(name, "area_weighted_random_sample_triangle_mesh"): faces = tf.convert_to_tensor(value=faces) vertex_attributes = tf.convert_to_tensor(value=vertex_attributes) num_samples = tf.convert_to_tensor(value=num_samples) shape.check_static( tensor=vertex_attributes, tensor_name="vertex_attributes", has_rank_greater_than=1) shape.check_static( tensor=vertex_attributes, tensor_name="vertex_attributes", has_dim_greater_than=(-1, 2)) if vertex_positions is not None: vertex_positions = tf.convert_to_tensor(value=vertex_positions) else: vertex_positions = vertex_attributes[..., :3] shape.check_static( tensor=vertex_positions, tensor_name="vertex_positions", has_rank_greater_than=1) shape.check_static( tensor=vertex_positions, tensor_name="vertex_positions", has_dim_equals=(-1, 3)) triangle_vertex_positions = normals.gather_faces(vertex_positions, faces) triangle_areas = triangle_area(triangle_vertex_positions[..., 0, :], triangle_vertex_positions[..., 1, :], triangle_vertex_positions[..., 2, :]) return weighted_random_sample_triangle_mesh( vertex_attributes, faces, num_samples, face_weights=triangle_areas, seed=seed, stateless=stateless) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Computes a weighted point sampling of a triangular mesh. This op computes a uniform sampling of points on the surface of the mesh. Points are sampled from the surface of each triangle using a uniform distribution, proportional to a specified face density (e.g. face area). Uses the approach mentioned in the TOG 2002 paper "Shape distributions" (https://dl.acm.org/citation.cfm?id=571648) to generate random barycentric coordinates. This op can be used for several tasks, including better mesh reconstruction. For example, see these recent papers demonstrating reconstruction losses using this op: 1. "GEOMetrics: Exploiting Geometric Structure for Graph-Encoded Objects" (https://arxiv.org/abs/1901.11461) ICML 2019. 2. "Mesh R-CNN" (https://arxiv.org/abs/1906.02739) ICCV 2019. Op is differentiable w.r.t mesh vertex positions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.geometry.representation import triangle from tensorflow_graphics.geometry.representation.mesh import normals from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def triangle_area(vertex0, vertex1, vertex2, name=None): """Computes triangle areas. Note: Computed triangle area = 0.5 * | e1 x e2 | where e1 and e2 are edges of triangle. In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. In the following, A1 to An are optional batch dimensions. Args: vertex0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the first vertex of a triangle. vertex1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the second vertex of a triangle. vertex2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the third vertex of a triangle. name: A name for this op. Defaults to "triangle_area". Returns: A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents the triangle areas. """ with tf.compat.v1.name_scope(name, "triangle_area"): vertex0 = tf.convert_to_tensor(value=vertex0) vertex1 = tf.convert_to_tensor(value=vertex1) vertex2 = tf.convert_to_tensor(value=vertex2) triangle_normals = triangle.normal( vertex0, vertex1, vertex2, normalize=False) areas = 0.5 * tf.linalg.norm(tensor=triangle_normals, axis=-1) return areas def _random_categorical_sample(num_samples, weights, seed=None, stateless=False, name=None, sample_dtype=tf.int32): """Samples from a categorical distribution with arbitrary batch dimensions. Note: In the following, A1 to An are optional batch dimensions. Args: num_samples: An `int32` scalar denoting the number of samples to generate per mesh. weights: A `float` tensor of shape `[A1, ..., An, F]` where F is number of faces. All weights must be > 0. seed: Optional random seed, value depends on `stateless`. stateless: Optional flag to use stateless random sampler. If stateless=True, then `seed` must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate the same reproducible sequence across calls. If stateless=False, then a stateful random number generator is used (default behavior). name: Name for op. Defaults to "random_categorical_sample". sample_dtype: Type of output samples. Returns: A `sample_dtype` tensor of shape `[A1, ..., An, num_samples]`. """ with tf.compat.v1.name_scope(name, "random_categorical_sample"): asserts.assert_all_above(weights, 0) logits = tf.math.log(weights) num_faces = tf.shape(input=logits)[-1] batch_shape = tf.shape(input=logits)[:-1] logits_2d = tf.reshape(logits, [-1, num_faces]) if stateless: seed = tf.convert_to_tensor(value=seed) shape.check_static( tensor=seed, tensor_name="seed", has_dim_equals=(-1, 2)) sample_fn = tf.random.stateless_categorical else: sample_fn = tf.random.categorical draws = sample_fn( logits=logits_2d, num_samples=num_samples, dtype=sample_dtype, seed=seed) samples = tf.reshape( draws, shape=tf.concat((batch_shape, (num_samples,)), axis=0)) return samples def generate_random_face_indices(num_samples, face_weights, seed=None, stateless=False, name=None): """Generate a sample of face ids given per face probability. Note: In the following, A1 to An are optional batch dimensions. Args: num_samples: An `int32` scalar denoting the number of samples to generate per mesh. face_weights: A `float` tensor of shape `[A1, ..., An, F]` where F is number of faces. All weights must be > 0. seed: Optional seed for the random number generator. stateless: Optional flag to use stateless random sampler. If stateless=True, then `seed` must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate the same reproducible sequence across calls. If stateless=False, then a stateful random number generator is used (default behavior). name: Name for op. Defaults to "generate_random_face_indices". Returns: An `int32` tensor of shape `[A1, ..., An, num_samples]` denoting sampled face indices. """ with tf.compat.v1.name_scope(name, "generate_random_face_indices"): num_samples = tf.convert_to_tensor(value=num_samples) face_weights = tf.convert_to_tensor(value=face_weights) shape.check_static( tensor=face_weights, tensor_name="face_weights", has_rank_greater_than=0) shape.check_static( tensor=num_samples, tensor_name="num_samples", has_rank=0) face_weights = asserts.assert_all_above(face_weights, minval=0.0) eps = asserts.select_eps_for_division(face_weights.dtype) face_weights = face_weights + eps sampled_face_indices = _random_categorical_sample( num_samples=num_samples, weights=face_weights, seed=seed, stateless=stateless) return sampled_face_indices def generate_random_barycentric_coordinates(sample_shape, dtype=tf.dtypes.float32, seed=None, stateless=False, name=None): """Generate uniformly sampled random barycentric coordinates. Note: In the following, A1 to An are optional batch dimensions. Args: sample_shape: An `int` tensor with shape `[n+1,]` and values `(A1, ..., An, num_samples)` denoting total number of random samples drawn, where `n` is number of batch dimensions, and `num_samples` is the number of samples drawn for each mesh. dtype: Optional type of generated barycentric coordinates, defaults to float32. seed: An optional random seed. stateless: Optional flag to use stateless random sampler. If stateless=True, then `seed` must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate the same reproducible sequence across calls. If stateless=False, then a stateful random number generator is used (default behavior). name: Name for op. Defaults to "generate_random_barycentric_coordinates". Returns: A `dtype` tensor of shape [A1, ..., An, num_samples, 3], where the last dimension contains the sampled barycentric coordinates. """ with tf.compat.v1.name_scope(name, "generate_random_barycentric_coordinates"): sample_shape = tf.convert_to_tensor(value=sample_shape) shape.check_static( tensor=sample_shape, tensor_name="sample_shape", has_rank=1) sample_shape = tf.concat((sample_shape, (2,)), axis=0) if stateless: seed = tf.convert_to_tensor(value=seed) shape.check_static( tensor=seed, tensor_name="seed", has_dim_equals=(-1, 2)) sample_fn = tf.random.stateless_uniform else: sample_fn = tf.random.uniform random_uniform = sample_fn( shape=sample_shape, minval=0.0, maxval=1.0, dtype=dtype, seed=seed) random1 = tf.sqrt(random_uniform[..., 0]) random2 = random_uniform[..., 1] barycentric = tf.stack( (1 - random1, random1 * (1 - random2), random1 * random2), axis=-1) return barycentric def weighted_random_sample_triangle_mesh(vertex_attributes, faces, num_samples, face_weights, seed=None, stateless=False, name=None): """Performs a face probability weighted random sampling of a tri mesh. Note: In the following, A1 to An are optional batch dimensions. Args: vertex_attributes: A `float` tensor of shape `[A1, ..., An, V, D]`, where V is the number of vertices, and D is dimensionality of each vertex. faces: A `int` tensor of shape `[A1, ..., An, F, 3]`, where F is the number of faces. num_samples: A `int` 0-D tensor denoting number of samples to be drawn from each mesh. face_weights: A `float` tensor of shape ``[A1, ..., An, F]`, denoting unnormalized sampling probability of each face, where F is the number of faces. seed: Optional random seed. stateless: Optional flag to use stateless random sampler. If stateless=True, then seed must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate same sequence across calls. name: Name for op. Defaults to "weighted_random_sample_triangle_mesh". Returns: sample_points: A `float` tensor of shape `[A1, ..., An, num_samples, D]`, where D is dimensionality of each sampled point. sample_face_indices: A `int` tensor of shape `[A1, ..., An, num_samples]`. """ with tf.compat.v1.name_scope(name, "weighted_random_sample_triangle_mesh"): faces = tf.convert_to_tensor(value=faces) vertex_attributes = tf.convert_to_tensor(value=vertex_attributes) face_weights = tf.convert_to_tensor(value=face_weights) num_samples = tf.convert_to_tensor(value=num_samples) shape.check_static( tensor=vertex_attributes, tensor_name="vertex_attributes", has_rank_greater_than=1) shape.check_static( tensor=faces, tensor_name="faces", has_rank_greater_than=1) shape.check_static( tensor=face_weights, tensor_name="face_weights", has_rank_greater_than=0) shape.compare_batch_dimensions( tensors=(faces, face_weights), last_axes=(-2, -1), tensor_names=("faces", "face_weights"), broadcast_compatible=False) shape.compare_batch_dimensions( tensors=(vertex_attributes, faces, face_weights), last_axes=(-3, -3, -2), tensor_names=("vertex_attributes", "faces", "face_weights"), broadcast_compatible=False) asserts.assert_all_above(face_weights, 0) batch_dims = faces.shape.ndims - 2 batch_shape = faces.shape.as_list()[:-2] sample_shape = tf.concat( (batch_shape, tf.convert_to_tensor( value=(num_samples,), dtype=tf.int32)), axis=0) sample_face_indices = generate_random_face_indices( num_samples, face_weights, seed=seed, stateless=stateless) sample_vertex_indices = tf.gather( faces, sample_face_indices, batch_dims=batch_dims) sample_vertices = tf.gather( vertex_attributes, sample_vertex_indices, batch_dims=batch_dims) barycentric = generate_random_barycentric_coordinates( sample_shape, dtype=vertex_attributes.dtype, seed=seed, stateless=stateless) barycentric = tf.expand_dims(barycentric, axis=-1) sample_points = tf.math.multiply(sample_vertices, barycentric) sample_points = tf.reduce_sum(input_tensor=sample_points, axis=-2) return sample_points, sample_face_indices def area_weighted_random_sample_triangle_mesh(vertex_attributes, faces, num_samples, vertex_positions=None, seed=None, stateless=False, name=None): """Performs a face area weighted random sampling of a tri mesh. Note: In the following, A1 to An are optional batch dimensions. Args: vertex_attributes: A `float` tensor of shape `[A1, ..., An, V, D]`, where V is the number of vertices, and D is dimensionality of a feature defined on each vertex. If `vertex_positions` is not provided, then first 3 dimensions of `vertex_attributes` denote the vertex positions. faces: A `int` tensor of shape `[A1, ..., An, F, 3]`, where F is the number of faces. num_samples: An `int` scalar denoting number of samples to be drawn from each mesh. vertex_positions: An optional `float` tensor of shape `[A1, ..., An, V, 3]`, where V is the number of vertices. If None, then vertex_attributes[..., :3] is used as vertex positions. seed: Optional random seed. stateless: Optional flag to use stateless random sampler. If stateless=True, then seed must be provided as shape `[2]` int tensor. Stateless random sampling is useful for testing to generate same sequence across calls. name: Name for op. Defaults to "area_weighted_random_sample_triangle_mesh". Returns: sample_pts: A `float` tensor of shape `[A1, ..., An, num_samples, D]`, where D is dimensionality of each sampled point. sample_face_indices: A `int` tensor of shape `[A1, ..., An, num_samples]`. """ with tf.compat.v1.name_scope(name, "area_weighted_random_sample_triangle_mesh"): faces = tf.convert_to_tensor(value=faces) vertex_attributes = tf.convert_to_tensor(value=vertex_attributes) num_samples = tf.convert_to_tensor(value=num_samples) shape.check_static( tensor=vertex_attributes, tensor_name="vertex_attributes", has_rank_greater_than=1) shape.check_static( tensor=vertex_attributes, tensor_name="vertex_attributes", has_dim_greater_than=(-1, 2)) if vertex_positions is not None: vertex_positions = tf.convert_to_tensor(value=vertex_positions) else: vertex_positions = vertex_attributes[..., :3] shape.check_static( tensor=vertex_positions, tensor_name="vertex_positions", has_rank_greater_than=1) shape.check_static( tensor=vertex_positions, tensor_name="vertex_positions", has_dim_equals=(-1, 3)) triangle_vertex_positions = normals.gather_faces(vertex_positions, faces) triangle_areas = triangle_area(triangle_vertex_positions[..., 0, :], triangle_vertex_positions[..., 1, :], triangle_vertex_positions[..., 2, :]) return weighted_random_sample_triangle_mesh( vertex_attributes, faces, num_samples, face_weights=triangle_areas, seed=seed, stateless=stateless) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/geometry/transformation/tests/linear_blend_skinning_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for linear blend skinning.""" # pylint: disable=line-too-long from absl.testing import flagsaver from absl.testing import parameterized import tensorflow as tf from tensorflow_graphics.geometry.transformation import linear_blend_skinning from tensorflow_graphics.geometry.transformation.tests import test_helpers from tensorflow_graphics.util import test_case class LinearBlendSkinningTest(test_case.TestCase): # pyformat: disable @parameterized.parameters( ((3,), (7,), (7, 3, 3), (7, 3)), ((None, 3), (None, 9), (None, 9, 3, 3), (None, 9, 3)), ((7, 1, 3), (1, 4, 11), (5, 11, 3, 3), (1, 11, 3)), ((7, 4, 3), (4, 11), (11, 3, 3), (11, 3)), ((3,), (5, 4, 11), (11, 3, 3), (11, 3)), ) # pyformat: enable def test_blend_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(linear_blend_skinning.blend, shapes) # pyformat: disable @parameterized.parameters( ("points must have exactly 3 dimensions in axis -1", (None,), (7,), (7, 3, 3), (7, 3)), ("bone_rotations must have a rank greater than 2", (3,), (7,), (3, 3), (3,)), ("bone_rotations must have exactly 3 dimensions in axis -1", (3,), (7,), (7, 3, None), (7, 3)), ("bone_rotations must have exactly 3 dimensions in axis -2", (3,), (7,), (7, None, 3), (7, 3)), ("bone_translations must have a rank greater than 1", (3,), (7,), (7, 3, 3), (3,)), ("bone_translations must have exactly 3 dimensions in axis -1", (3,), (7,), (7, 3, 3), (7, None)), (r"Tensors \[\'skinning_weights\', \'bone_rotations\'\] must have the same number of dimensions in axes", (3,), (9,), (7, 3, 3), (9, 3)), (r"Tensors \[\'skinning_weights\', \'bone_translations\'\] must have the same number of dimensions in axes", (3,), (9,), (9, 3, 3), (7, 3)), ("Not all batch dimensions are broadcast-compatible", (2, 3, 3), (3, 1, 7), (7, 3, 3), (7, 3)), ("Not all batch dimensions are broadcast-compatible", (2, 3, 3), (2, 1, 7), (3, 7, 3, 3), (2, 7, 3)), ) # pyformat: enable def test_blend_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(linear_blend_skinning.blend, error_msg, shapes) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_blend_jacobian_random(self): """Test the Jacobian of the blend function.""" (x_points_init, x_weights_init, x_rotations_init, x_translations_init) = test_helpers.generate_random_test_lbs_blend() self.assert_jacobian_is_correct_fn( linear_blend_skinning.blend, [x_points_init, x_weights_init, x_rotations_init, x_translations_init]) def test_blend_preset(self): """Checks that blend returns the expected value.""" (x_points_init, x_weights_init, x_rotations_init, x_translations_init, y_blended_points_init) = test_helpers.generate_preset_test_lbs_blend() x_points = tf.convert_to_tensor(value=x_points_init) x_weights = tf.convert_to_tensor(value=x_weights_init) x_rotations = tf.convert_to_tensor(value=x_rotations_init) x_translations = tf.convert_to_tensor(value=x_translations_init) y_blended_points = tf.convert_to_tensor(value=y_blended_points_init) y = linear_blend_skinning.blend(x_points, x_weights, x_rotations, x_translations) self.assertAllClose(y_blended_points, y) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for linear blend skinning.""" # pylint: disable=line-too-long from absl.testing import flagsaver from absl.testing import parameterized import tensorflow as tf from tensorflow_graphics.geometry.transformation import linear_blend_skinning from tensorflow_graphics.geometry.transformation.tests import test_helpers from tensorflow_graphics.util import test_case class LinearBlendSkinningTest(test_case.TestCase): # pyformat: disable @parameterized.parameters( ((3,), (7,), (7, 3, 3), (7, 3)), ((None, 3), (None, 9), (None, 9, 3, 3), (None, 9, 3)), ((7, 1, 3), (1, 4, 11), (5, 11, 3, 3), (1, 11, 3)), ((7, 4, 3), (4, 11), (11, 3, 3), (11, 3)), ((3,), (5, 4, 11), (11, 3, 3), (11, 3)), ) # pyformat: enable def test_blend_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(linear_blend_skinning.blend, shapes) # pyformat: disable @parameterized.parameters( ("points must have exactly 3 dimensions in axis -1", (None,), (7,), (7, 3, 3), (7, 3)), ("bone_rotations must have a rank greater than 2", (3,), (7,), (3, 3), (3,)), ("bone_rotations must have exactly 3 dimensions in axis -1", (3,), (7,), (7, 3, None), (7, 3)), ("bone_rotations must have exactly 3 dimensions in axis -2", (3,), (7,), (7, None, 3), (7, 3)), ("bone_translations must have a rank greater than 1", (3,), (7,), (7, 3, 3), (3,)), ("bone_translations must have exactly 3 dimensions in axis -1", (3,), (7,), (7, 3, 3), (7, None)), (r"Tensors \[\'skinning_weights\', \'bone_rotations\'\] must have the same number of dimensions in axes", (3,), (9,), (7, 3, 3), (9, 3)), (r"Tensors \[\'skinning_weights\', \'bone_translations\'\] must have the same number of dimensions in axes", (3,), (9,), (9, 3, 3), (7, 3)), ("Not all batch dimensions are broadcast-compatible", (2, 3, 3), (3, 1, 7), (7, 3, 3), (7, 3)), ("Not all batch dimensions are broadcast-compatible", (2, 3, 3), (2, 1, 7), (3, 7, 3, 3), (2, 7, 3)), ) # pyformat: enable def test_blend_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(linear_blend_skinning.blend, error_msg, shapes) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_blend_jacobian_random(self): """Test the Jacobian of the blend function.""" (x_points_init, x_weights_init, x_rotations_init, x_translations_init) = test_helpers.generate_random_test_lbs_blend() self.assert_jacobian_is_correct_fn( linear_blend_skinning.blend, [x_points_init, x_weights_init, x_rotations_init, x_translations_init]) def test_blend_preset(self): """Checks that blend returns the expected value.""" (x_points_init, x_weights_init, x_rotations_init, x_translations_init, y_blended_points_init) = test_helpers.generate_preset_test_lbs_blend() x_points = tf.convert_to_tensor(value=x_points_init) x_weights = tf.convert_to_tensor(value=x_weights_init) x_rotations = tf.convert_to_tensor(value=x_rotations_init) x_translations = tf.convert_to_tensor(value=x_translations_init) y_blended_points = tf.convert_to_tensor(value=y_blended_points_init) y = linear_blend_skinning.blend(x_points, x_weights, x_rotations, x_translations) self.assertAllClose(y_blended_points, y) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/io/triangle_mesh.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """A thin wrapper around the trimesh library for loading triangle meshes.""" import os import tensorflow as tf import trimesh from trimesh import Scene from trimesh import Trimesh # TODO(b/156115314): Revisit the library for loading the triangle meshes. class GFileResolver(trimesh.visual.resolvers.Resolver): """A resolver using gfile for accessing other assets in the mesh directory.""" def __init__(self, path): if tf.io.gfile.isdir(path): self.directory = path elif tf.io.gfile.exists(path): self.directory = os.path.dirname(path) else: raise ValueError('path is not a file or directory') def get(self, name): with tf.io.gfile.GFile(os.path.join(self.directory, name), 'rb') as f: data = f.read() return data def load(file_obj, file_type=None, **kwargs): """Loads a triangle mesh from the given GFile/file path. Args: file_obj: A tf.io.gfile.GFile object or a string specifying the mesh file path. file_type: A string specifying the type of the file (e.g. 'obj', 'stl'). If not specified the file_type will be inferred from the file name. **kwargs: Additional arguments that should be passed to trimesh.load(). Returns: A trimesh.Trimesh or trimesh.Scene. """ if isinstance(file_obj, str): with tf.io.gfile.GFile(file_obj, 'r') as f: if file_type is None: file_type = trimesh.util.split_extension(file_obj) return trimesh.load( file_obj=f, file_type=file_type, resolver=GFileResolver(file_obj), **kwargs) if trimesh.util.is_file(file_obj): if not hasattr(file_obj, 'name') or not file_obj.name: raise ValueError( 'file_obj must have attribute "name". Try passing the file name instead.' ) if file_type is None: file_type = trimesh.util.split_extension(file_obj.name) return trimesh.load( file_obj=file_obj, file_type=file_type, resolver=GFileResolver(file_obj.name), **kwargs) raise ValueError('file_obj should be either a file object or a string') __all__ = ['load', 'Trimesh', 'Scene']
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """A thin wrapper around the trimesh library for loading triangle meshes.""" import os import tensorflow as tf import trimesh from trimesh import Scene from trimesh import Trimesh # TODO(b/156115314): Revisit the library for loading the triangle meshes. class GFileResolver(trimesh.visual.resolvers.Resolver): """A resolver using gfile for accessing other assets in the mesh directory.""" def __init__(self, path): if tf.io.gfile.isdir(path): self.directory = path elif tf.io.gfile.exists(path): self.directory = os.path.dirname(path) else: raise ValueError('path is not a file or directory') def get(self, name): with tf.io.gfile.GFile(os.path.join(self.directory, name), 'rb') as f: data = f.read() return data def load(file_obj, file_type=None, **kwargs): """Loads a triangle mesh from the given GFile/file path. Args: file_obj: A tf.io.gfile.GFile object or a string specifying the mesh file path. file_type: A string specifying the type of the file (e.g. 'obj', 'stl'). If not specified the file_type will be inferred from the file name. **kwargs: Additional arguments that should be passed to trimesh.load(). Returns: A trimesh.Trimesh or trimesh.Scene. """ if isinstance(file_obj, str): with tf.io.gfile.GFile(file_obj, 'r') as f: if file_type is None: file_type = trimesh.util.split_extension(file_obj) return trimesh.load( file_obj=f, file_type=file_type, resolver=GFileResolver(file_obj), **kwargs) if trimesh.util.is_file(file_obj): if not hasattr(file_obj, 'name') or not file_obj.name: raise ValueError( 'file_obj must have attribute "name". Try passing the file name instead.' ) if file_type is None: file_type = trimesh.util.split_extension(file_obj.name) return trimesh.load( file_obj=file_obj, file_type=file_type, resolver=GFileResolver(file_obj.name), **kwargs) raise ValueError('file_obj should be either a file object or a string') __all__ = ['load', 'Trimesh', 'Scene']
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/projects/nasa/lib/datasets.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dataset implementations.""" from os import path import tensorflow.compat.v1 as tf tf.disable_eager_execution() def get_dataset(split, hparams): return dataset_dict[hparams.dataset](split, hparams) def amass(split, hparams): """Construct an AMASS data loader.""" def _input_fn(params): # pylint: disable=unused-argument # Dataset constants. n_bbox = 100000 n_surf = 100000 n_points = n_bbox + n_surf n_vert = 6890 n_frames = 1 # Parse parameters for global configurations. n_dims = hparams.n_dims data_dir = hparams.data_dir sample_bbox = hparams.sample_bbox sample_surf = hparams.sample_surf batch_size = hparams.batch_size subject = hparams.subject motion = hparams.motion n_parts = hparams.n_parts def _parse_tfrecord(serialized_example): fs = tf.parse_single_example( serialized_example, features={ 'point': tf.FixedLenFeature([n_frames * n_points * n_dims], tf.float32), 'label': tf.FixedLenFeature([n_frames * n_points * 1], tf.float32), 'vert': tf.FixedLenFeature([n_frames * n_vert * n_dims], tf.float32), 'weight': tf.FixedLenFeature([n_frames * n_vert * n_parts], tf.float32), 'transform': tf.FixedLenFeature( [n_frames * n_parts * (n_dims + 1) * (n_dims + 1)], tf.float32), 'joint': tf.FixedLenFeature([n_frames * n_parts * n_dims], tf.float32), 'name': tf.FixedLenFeature([], tf.string), }) fs['point'] = tf.reshape(fs['point'], [n_frames, n_points, n_dims]) fs['label'] = tf.reshape(fs['label'], [n_frames, n_points, 1]) fs['vert'] = tf.reshape(fs['vert'], [n_frames, n_vert, n_dims]) fs['weight'] = tf.reshape(fs['weight'], [n_frames, n_vert, n_parts]) fs['transform'] = tf.reshape(fs['transform'], [n_frames, n_parts, n_dims + 1, n_dims + 1]) fs['joint'] = tf.reshape(fs['joint'], [n_frames, n_parts, n_dims]) return fs def _sample_frame_points(fs): feature = {} for k, v in fs.items(): feature[k] = v points = feature['point'][0] labels = feature['label'][0] sample_points = [] sample_labels = [] if sample_bbox > 0: indices_bbox = tf.random.uniform([sample_bbox], minval=0, maxval=n_bbox, dtype=tf.int32) bbox_samples = tf.gather(points[:n_bbox], indices_bbox, axis=0) bbox_labels = tf.gather(labels[:n_bbox], indices_bbox, axis=0) sample_points.append(bbox_samples) sample_labels.append(bbox_labels) if sample_surf > 0: indices_surf = tf.random.uniform([sample_surf], minval=0, maxval=n_surf, dtype=tf.int32) surf_samples = tf.gather( points[n_bbox:n_bbox + n_surf], indices_surf, axis=0) surf_labels = tf.gather( labels[n_bbox:n_bbox + n_surf], indices_surf, axis=0) sample_points.append(surf_samples) sample_labels.append(surf_labels) points = tf.concat(sample_points, axis=0) point_labels = tf.concat(sample_labels, axis=0) feature['point'] = tf.expand_dims(points, axis=0) feature['label'] = tf.expand_dims(point_labels, axis=0) return feature def _sample_eval_points(fs): feature = {} feature['transform'] = fs['transform'] feature['points'] = fs['point'][:, :n_bbox] feature['labels'] = fs['label'][:, :n_bbox] feature['name'] = fs['name'] feature['vert'] = fs['vert'] feature['weight'] = fs['weight'] feature['joint'] = fs['joint'] return feature data_split = 'train' all_motions = list(x for x in range(10)) if split == 'train': file_pattern = [ path.join(data_dir, '{0}-{1:02d}-{2:02d}-*'.format(data_split, subject, x)) for x in all_motions if x != motion ] else: file_pattern = [ path.join(data_dir, '{0}-{1:02d}-{2:02d}-*'.format(data_split, subject, motion)) ] data_files = tf.gfile.Glob(file_pattern) if not data_files: raise IOError('{} did not match any files'.format(file_pattern)) filenames = tf.data.Dataset.list_files(file_pattern, shuffle=True) data = filenames.interleave( lambda x: tf.data.TFRecordDataset([x]), num_parallel_calls=tf.data.experimental.AUTOTUNE) data = data.map( _parse_tfrecord, num_parallel_calls=tf.data.experimental.AUTOTUNE).cache() if split == 'train': data = data.map( _sample_frame_points, num_parallel_calls=tf.data.experimental.AUTOTUNE) else: data = data.map( _sample_eval_points, num_parallel_calls=tf.data.experimental.AUTOTUNE) if split == 'train': data = data.shuffle(int(batch_size * 2.5)).repeat(-1) else: batch_size = 1 return data.batch( batch_size, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE) return _input_fn dataset_dict = { 'amass': amass, }
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dataset implementations.""" from os import path import tensorflow.compat.v1 as tf tf.disable_eager_execution() def get_dataset(split, hparams): return dataset_dict[hparams.dataset](split, hparams) def amass(split, hparams): """Construct an AMASS data loader.""" def _input_fn(params): # pylint: disable=unused-argument # Dataset constants. n_bbox = 100000 n_surf = 100000 n_points = n_bbox + n_surf n_vert = 6890 n_frames = 1 # Parse parameters for global configurations. n_dims = hparams.n_dims data_dir = hparams.data_dir sample_bbox = hparams.sample_bbox sample_surf = hparams.sample_surf batch_size = hparams.batch_size subject = hparams.subject motion = hparams.motion n_parts = hparams.n_parts def _parse_tfrecord(serialized_example): fs = tf.parse_single_example( serialized_example, features={ 'point': tf.FixedLenFeature([n_frames * n_points * n_dims], tf.float32), 'label': tf.FixedLenFeature([n_frames * n_points * 1], tf.float32), 'vert': tf.FixedLenFeature([n_frames * n_vert * n_dims], tf.float32), 'weight': tf.FixedLenFeature([n_frames * n_vert * n_parts], tf.float32), 'transform': tf.FixedLenFeature( [n_frames * n_parts * (n_dims + 1) * (n_dims + 1)], tf.float32), 'joint': tf.FixedLenFeature([n_frames * n_parts * n_dims], tf.float32), 'name': tf.FixedLenFeature([], tf.string), }) fs['point'] = tf.reshape(fs['point'], [n_frames, n_points, n_dims]) fs['label'] = tf.reshape(fs['label'], [n_frames, n_points, 1]) fs['vert'] = tf.reshape(fs['vert'], [n_frames, n_vert, n_dims]) fs['weight'] = tf.reshape(fs['weight'], [n_frames, n_vert, n_parts]) fs['transform'] = tf.reshape(fs['transform'], [n_frames, n_parts, n_dims + 1, n_dims + 1]) fs['joint'] = tf.reshape(fs['joint'], [n_frames, n_parts, n_dims]) return fs def _sample_frame_points(fs): feature = {} for k, v in fs.items(): feature[k] = v points = feature['point'][0] labels = feature['label'][0] sample_points = [] sample_labels = [] if sample_bbox > 0: indices_bbox = tf.random.uniform([sample_bbox], minval=0, maxval=n_bbox, dtype=tf.int32) bbox_samples = tf.gather(points[:n_bbox], indices_bbox, axis=0) bbox_labels = tf.gather(labels[:n_bbox], indices_bbox, axis=0) sample_points.append(bbox_samples) sample_labels.append(bbox_labels) if sample_surf > 0: indices_surf = tf.random.uniform([sample_surf], minval=0, maxval=n_surf, dtype=tf.int32) surf_samples = tf.gather( points[n_bbox:n_bbox + n_surf], indices_surf, axis=0) surf_labels = tf.gather( labels[n_bbox:n_bbox + n_surf], indices_surf, axis=0) sample_points.append(surf_samples) sample_labels.append(surf_labels) points = tf.concat(sample_points, axis=0) point_labels = tf.concat(sample_labels, axis=0) feature['point'] = tf.expand_dims(points, axis=0) feature['label'] = tf.expand_dims(point_labels, axis=0) return feature def _sample_eval_points(fs): feature = {} feature['transform'] = fs['transform'] feature['points'] = fs['point'][:, :n_bbox] feature['labels'] = fs['label'][:, :n_bbox] feature['name'] = fs['name'] feature['vert'] = fs['vert'] feature['weight'] = fs['weight'] feature['joint'] = fs['joint'] return feature data_split = 'train' all_motions = list(x for x in range(10)) if split == 'train': file_pattern = [ path.join(data_dir, '{0}-{1:02d}-{2:02d}-*'.format(data_split, subject, x)) for x in all_motions if x != motion ] else: file_pattern = [ path.join(data_dir, '{0}-{1:02d}-{2:02d}-*'.format(data_split, subject, motion)) ] data_files = tf.gfile.Glob(file_pattern) if not data_files: raise IOError('{} did not match any files'.format(file_pattern)) filenames = tf.data.Dataset.list_files(file_pattern, shuffle=True) data = filenames.interleave( lambda x: tf.data.TFRecordDataset([x]), num_parallel_calls=tf.data.experimental.AUTOTUNE) data = data.map( _parse_tfrecord, num_parallel_calls=tf.data.experimental.AUTOTUNE).cache() if split == 'train': data = data.map( _sample_frame_points, num_parallel_calls=tf.data.experimental.AUTOTUNE) else: data = data.map( _sample_eval_points, num_parallel_calls=tf.data.experimental.AUTOTUNE) if split == 'train': data = data.shuffle(int(batch_size * 2.5)).repeat(-1) else: batch_size = 1 return data.batch( batch_size, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE) return _input_fn dataset_dict = { 'amass': amass, }
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/projects/neural_voxel_renderer/models.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Definition of NVR+ keras model.""" import tensorflow.compat.v1 as tf import tensorflow_graphics.projects.neural_voxel_renderer.layers as layer_utils initializer = tf.keras.initializers.glorot_normal() layers = tf.keras.layers def unet_3x_with_res_in_mid(feat_in, out_filters, norm2d): """Helper function of a Unet with res blocks in the middle.""" e1 = layer_utils.residual_block_2d(feat_in, nfilters=128, strides=(2, 2), normalization=norm2d) # 16x128 e2 = layer_utils.residual_block_2d(e1, nfilters=256, strides=(2, 2), normalization=norm2d) # 8x256 e3 = layer_utils.residual_block_2d(e2, nfilters=512, strides=(2, 2), normalization=norm2d) # 4x512 mid1 = layer_utils.residual_block_2d(e3, nfilters=512, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d mid2 = layer_utils.residual_block_2d(mid1, nfilters=512, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d mid3 = layer_utils.residual_block_2d(mid2, nfilters=512, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d d0 = layer_utils.upconv(mid3, nfilters=256, size=4, strides=1) # 8x256 d1 = layers.concatenate([d0, e2]) # 8x512 d2 = layers.Conv2D(256, kernel_size=4, strides=(1, 1), padding='same', kernel_initializer=initializer)(d1) # 8x256 d3 = layer_utils.upconv(d2, nfilters=128, size=4, strides=1) # 16x128 d4 = layers.concatenate([d3, e1]) # 16x256 d5 = layers.Conv2D(128, kernel_size=4, strides=(1, 1), padding='same', kernel_initializer=initializer)(d4) # 8x256 d6 = layer_utils.upconv(d5, nfilters=64, size=4, strides=1) # 32x64 d7 = layers.concatenate([d6, feat_in]) # 32xN d8 = layers.Conv2D(out_filters, kernel_size=4, strides=(1, 1), padding='same', kernel_initializer=initializer)(d7) # 32xout return d8 def neural_voxel_renderer_plus(voxels, rerendering, light_pos, size=4, norm2d='batchnorm', norm3d='batchnorm'): """Neural Voxel Renderer + keras model.""" with tf.name_scope('Network/'): voxels = layers.Input(tensor=voxels) rerendering = layers.Input(tensor=rerendering) light_pos = layers.Input(tensor=light_pos) nf_2d = 512 with tf.name_scope('VoxelProcessing'): vol0_a = layer_utils.conv_block_3d(voxels, nfilters=16, size=size, strides=2, normalization=norm3d) # 64x64x64x16 vol0_b = layer_utils.conv_block_3d(vol0_a, nfilters=16, size=size, strides=1, normalization=norm3d) # 64x64x64x16 vol1_a = layer_utils.conv_block_3d(vol0_b, nfilters=16, size=size, strides=2, normalization=norm3d) # 32x32x32x16 vol1_b = layer_utils.conv_block_3d(vol1_a, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 vol1_c = layer_utils.conv_block_3d(vol1_b, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 shortcut = vol1_c vol_a1 = layer_utils.residual_block_3d(vol1_c, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a2 = layer_utils.residual_block_3d(vol_a1, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a3 = layer_utils.residual_block_3d(vol_a2, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a4 = layer_utils.residual_block_3d(vol_a3, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a5 = layer_utils.residual_block_3d(vol_a4, 32, strides=(1, 1, 1), normalization=norm3d) # 32x encoded_vol = layers.add([shortcut, vol_a5]) encoded_vol = layers.Reshape([32, 32, 32*32])(encoded_vol) encoded_vol = layers.Conv2D(nf_2d, kernel_size=1, strides=(1, 1), padding='same', kernel_initializer=initializer)(encoded_vol) latent_projection = layers.LeakyReLU()(encoded_vol) # 32x32x512 with tf.name_scope('ProjectionProcessing'): shortcut = latent_projection # 32x32xnf_2d e1 = layer_utils.residual_block_2d(latent_projection, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e2 = layer_utils.residual_block_2d(e1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e3 = layer_utils.residual_block_2d(e2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e4 = layer_utils.residual_block_2d(e3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e5 = layer_utils.residual_block_2d(e4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d encoded_proj = layers.add([shortcut, e5]) # 32x32xnf_2d with tf.name_scope('LightProcessing'): fc_light = layers.Dense(64, kernel_initializer=initializer)(light_pos) light_code = layers.Dense(64, kernel_initializer=initializer)(fc_light) light_code = \ layers.Lambda(lambda v: tf.tile(v[0], [1, 32*32]))([light_code]) light_code = layers.Reshape((32, 32, 64))(light_code) # 32x32x64 with tf.name_scope('Merger'): latent_code_final = layers.concatenate([encoded_proj, light_code]) latent_code_final = layer_utils.conv_block_2d(latent_code_final, nfilters=nf_2d, size=size, strides=1, normalization=norm3d) shortcut = latent_code_final m1 = layer_utils.residual_block_2d(latent_code_final, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m2 = layer_utils.residual_block_2d(m1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m3 = layer_utils.residual_block_2d(m2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m4 = layer_utils.residual_block_2d(m3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m5 = layer_utils.residual_block_2d(m4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d latent_code_final2 = layers.add([shortcut, m5]) # 32x32xnf_2d with tf.name_scope('Decoder'): d7 = layer_utils.conv_t_block_2d(latent_code_final2, nfilters=128, size=size, strides=2, normalization=norm2d) # 64x64x128 d7 = layer_utils.conv_block_2d(d7, nfilters=128, size=size, strides=1, normalization=norm2d) # 64x64x128 d8 = layer_utils.conv_t_block_2d(d7, nfilters=64, size=size, strides=2, normalization=norm2d) # 128x128x64 d8 = layer_utils.conv_block_2d(d8, nfilters=64, size=size, strides=1, normalization=norm2d) # 128x128x64 d9 = layer_utils.conv_t_block_2d(d8, nfilters=32, size=size, strides=2, normalization=norm2d) # 256x256x32 d9 = layer_utils.conv_block_2d(d9, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x256x32 rendered_image = layers.Conv2D(32, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(d9) # 256x256x3 with tf.name_scope('ImageProcessingNetwork'): ec1 = layer_utils.conv_block_2d(rerendering, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x ec2 = layer_utils.conv_block_2d(ec1, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x with tf.name_scope('NeuralRerenderingNetwork'): latent_img = layers.add([rendered_image, ec2]) target_code = unet_3x_with_res_in_mid(latent_img, 32, norm2d=norm2d) out0 = layer_utils.conv_block_2d(target_code, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x predicted_image = layers.Conv2D(3, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(out0) # 256x256x3 return tf.keras.Model(inputs=[voxels, rerendering, light_pos], outputs=[predicted_image]) def neural_voxel_renderer_plus_tf2(size=4, norm2d='batchnorm', norm3d='batchnorm'): """Neural Voxel Renderer + keras model for tf2.""" with tf.name_scope('Network/'): voxels = layers.Input(shape=[128, 128, 128, 4]) rerendering = layers.Input(shape=[256, 256, 3]) light_pos = layers.Input(shape=[3]) nf_2d = 512 with tf.name_scope('VoxelProcessing'): vol0_a = layer_utils.conv_block_3d(voxels, nfilters=16, size=size, strides=2, normalization=norm3d) # 64x64x64x16 vol0_b = layer_utils.conv_block_3d(vol0_a, nfilters=16, size=size, strides=1, normalization=norm3d) # 64x64x64x16 vol1_a = layer_utils.conv_block_3d(vol0_b, nfilters=16, size=size, strides=2, normalization=norm3d) # 32x32x32x16 vol1_b = layer_utils.conv_block_3d(vol1_a, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 vol1_c = layer_utils.conv_block_3d(vol1_b, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 shortcut = vol1_c vol_a1 = layer_utils.residual_block_3d(vol1_c, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a2 = layer_utils.residual_block_3d(vol_a1, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a3 = layer_utils.residual_block_3d(vol_a2, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a4 = layer_utils.residual_block_3d(vol_a3, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a5 = layer_utils.residual_block_3d(vol_a4, 32, strides=(1, 1, 1), normalization=norm3d) # 32x encoded_vol = layers.add([shortcut, vol_a5]) encoded_vol = layers.Reshape([32, 32, 32*32])(encoded_vol) encoded_vol = layers.Conv2D(nf_2d, kernel_size=1, strides=(1, 1), padding='same', kernel_initializer=initializer)(encoded_vol) latent_projection = layers.LeakyReLU()(encoded_vol) # 32x32x512 with tf.name_scope('ProjectionProcessing'): shortcut = latent_projection # 32x32xnf_2d e1 = layer_utils.residual_block_2d(latent_projection, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e2 = layer_utils.residual_block_2d(e1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e3 = layer_utils.residual_block_2d(e2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e4 = layer_utils.residual_block_2d(e3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e5 = layer_utils.residual_block_2d(e4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d encoded_proj = layers.add([shortcut, e5]) # 32x32xnf_2d with tf.name_scope('LightProcessing'): fc_light = layers.Dense(64, kernel_initializer=initializer)(light_pos) light_code = layers.Dense(64, kernel_initializer=initializer)(fc_light) light_code = \ layers.Lambda(lambda v: tf.tile(v[0], [1, 32*32]))([light_code]) light_code = layers.Reshape((32, 32, 64))(light_code) # 32x32x64 with tf.name_scope('Merger'): latent_code_final = layers.concatenate([encoded_proj, light_code]) latent_code_final = layer_utils.conv_block_2d(latent_code_final, nfilters=nf_2d, size=size, strides=1, normalization=norm3d) shortcut = latent_code_final m1 = layer_utils.residual_block_2d(latent_code_final, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m2 = layer_utils.residual_block_2d(m1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m3 = layer_utils.residual_block_2d(m2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m4 = layer_utils.residual_block_2d(m3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m5 = layer_utils.residual_block_2d(m4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d latent_code_final2 = layers.add([shortcut, m5]) # 32x32xnf_2d with tf.name_scope('Decoder'): d7 = layer_utils.conv_t_block_2d(latent_code_final2, nfilters=128, size=size, strides=2, normalization=norm2d) # 64x64x128 d7 = layer_utils.conv_block_2d(d7, nfilters=128, size=size, strides=1, normalization=norm2d) # 64x64x128 d8 = layer_utils.conv_t_block_2d(d7, nfilters=64, size=size, strides=2, normalization=norm2d) # 128x128x64 d8 = layer_utils.conv_block_2d(d8, nfilters=64, size=size, strides=1, normalization=norm2d) # 128x128x64 d9 = layer_utils.conv_t_block_2d(d8, nfilters=32, size=size, strides=2, normalization=norm2d) # 256x256x32 d9 = layer_utils.conv_block_2d(d9, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x256x32 rendered_image = layers.Conv2D(32, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(d9) # 256x256x3 with tf.name_scope('ImageProcessingNetwork'): ec1 = layer_utils.conv_block_2d(rerendering, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x ec2 = layer_utils.conv_block_2d(ec1, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x with tf.name_scope('NeuralRerenderingNetwork'): latent_img = layers.add([rendered_image, ec2]) target_code = unet_3x_with_res_in_mid(latent_img, 32, norm2d=norm2d) out0 = layer_utils.conv_block_2d(target_code, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x predicted_image = layers.Conv2D(3, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(out0) # 256x256x3 return tf.keras.Model(inputs=[voxels, rerendering, light_pos], outputs=[predicted_image])
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Definition of NVR+ keras model.""" import tensorflow.compat.v1 as tf import tensorflow_graphics.projects.neural_voxel_renderer.layers as layer_utils initializer = tf.keras.initializers.glorot_normal() layers = tf.keras.layers def unet_3x_with_res_in_mid(feat_in, out_filters, norm2d): """Helper function of a Unet with res blocks in the middle.""" e1 = layer_utils.residual_block_2d(feat_in, nfilters=128, strides=(2, 2), normalization=norm2d) # 16x128 e2 = layer_utils.residual_block_2d(e1, nfilters=256, strides=(2, 2), normalization=norm2d) # 8x256 e3 = layer_utils.residual_block_2d(e2, nfilters=512, strides=(2, 2), normalization=norm2d) # 4x512 mid1 = layer_utils.residual_block_2d(e3, nfilters=512, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d mid2 = layer_utils.residual_block_2d(mid1, nfilters=512, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d mid3 = layer_utils.residual_block_2d(mid2, nfilters=512, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d d0 = layer_utils.upconv(mid3, nfilters=256, size=4, strides=1) # 8x256 d1 = layers.concatenate([d0, e2]) # 8x512 d2 = layers.Conv2D(256, kernel_size=4, strides=(1, 1), padding='same', kernel_initializer=initializer)(d1) # 8x256 d3 = layer_utils.upconv(d2, nfilters=128, size=4, strides=1) # 16x128 d4 = layers.concatenate([d3, e1]) # 16x256 d5 = layers.Conv2D(128, kernel_size=4, strides=(1, 1), padding='same', kernel_initializer=initializer)(d4) # 8x256 d6 = layer_utils.upconv(d5, nfilters=64, size=4, strides=1) # 32x64 d7 = layers.concatenate([d6, feat_in]) # 32xN d8 = layers.Conv2D(out_filters, kernel_size=4, strides=(1, 1), padding='same', kernel_initializer=initializer)(d7) # 32xout return d8 def neural_voxel_renderer_plus(voxels, rerendering, light_pos, size=4, norm2d='batchnorm', norm3d='batchnorm'): """Neural Voxel Renderer + keras model.""" with tf.name_scope('Network/'): voxels = layers.Input(tensor=voxels) rerendering = layers.Input(tensor=rerendering) light_pos = layers.Input(tensor=light_pos) nf_2d = 512 with tf.name_scope('VoxelProcessing'): vol0_a = layer_utils.conv_block_3d(voxels, nfilters=16, size=size, strides=2, normalization=norm3d) # 64x64x64x16 vol0_b = layer_utils.conv_block_3d(vol0_a, nfilters=16, size=size, strides=1, normalization=norm3d) # 64x64x64x16 vol1_a = layer_utils.conv_block_3d(vol0_b, nfilters=16, size=size, strides=2, normalization=norm3d) # 32x32x32x16 vol1_b = layer_utils.conv_block_3d(vol1_a, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 vol1_c = layer_utils.conv_block_3d(vol1_b, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 shortcut = vol1_c vol_a1 = layer_utils.residual_block_3d(vol1_c, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a2 = layer_utils.residual_block_3d(vol_a1, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a3 = layer_utils.residual_block_3d(vol_a2, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a4 = layer_utils.residual_block_3d(vol_a3, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a5 = layer_utils.residual_block_3d(vol_a4, 32, strides=(1, 1, 1), normalization=norm3d) # 32x encoded_vol = layers.add([shortcut, vol_a5]) encoded_vol = layers.Reshape([32, 32, 32*32])(encoded_vol) encoded_vol = layers.Conv2D(nf_2d, kernel_size=1, strides=(1, 1), padding='same', kernel_initializer=initializer)(encoded_vol) latent_projection = layers.LeakyReLU()(encoded_vol) # 32x32x512 with tf.name_scope('ProjectionProcessing'): shortcut = latent_projection # 32x32xnf_2d e1 = layer_utils.residual_block_2d(latent_projection, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e2 = layer_utils.residual_block_2d(e1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e3 = layer_utils.residual_block_2d(e2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e4 = layer_utils.residual_block_2d(e3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e5 = layer_utils.residual_block_2d(e4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d encoded_proj = layers.add([shortcut, e5]) # 32x32xnf_2d with tf.name_scope('LightProcessing'): fc_light = layers.Dense(64, kernel_initializer=initializer)(light_pos) light_code = layers.Dense(64, kernel_initializer=initializer)(fc_light) light_code = \ layers.Lambda(lambda v: tf.tile(v[0], [1, 32*32]))([light_code]) light_code = layers.Reshape((32, 32, 64))(light_code) # 32x32x64 with tf.name_scope('Merger'): latent_code_final = layers.concatenate([encoded_proj, light_code]) latent_code_final = layer_utils.conv_block_2d(latent_code_final, nfilters=nf_2d, size=size, strides=1, normalization=norm3d) shortcut = latent_code_final m1 = layer_utils.residual_block_2d(latent_code_final, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m2 = layer_utils.residual_block_2d(m1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m3 = layer_utils.residual_block_2d(m2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m4 = layer_utils.residual_block_2d(m3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m5 = layer_utils.residual_block_2d(m4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d latent_code_final2 = layers.add([shortcut, m5]) # 32x32xnf_2d with tf.name_scope('Decoder'): d7 = layer_utils.conv_t_block_2d(latent_code_final2, nfilters=128, size=size, strides=2, normalization=norm2d) # 64x64x128 d7 = layer_utils.conv_block_2d(d7, nfilters=128, size=size, strides=1, normalization=norm2d) # 64x64x128 d8 = layer_utils.conv_t_block_2d(d7, nfilters=64, size=size, strides=2, normalization=norm2d) # 128x128x64 d8 = layer_utils.conv_block_2d(d8, nfilters=64, size=size, strides=1, normalization=norm2d) # 128x128x64 d9 = layer_utils.conv_t_block_2d(d8, nfilters=32, size=size, strides=2, normalization=norm2d) # 256x256x32 d9 = layer_utils.conv_block_2d(d9, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x256x32 rendered_image = layers.Conv2D(32, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(d9) # 256x256x3 with tf.name_scope('ImageProcessingNetwork'): ec1 = layer_utils.conv_block_2d(rerendering, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x ec2 = layer_utils.conv_block_2d(ec1, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x with tf.name_scope('NeuralRerenderingNetwork'): latent_img = layers.add([rendered_image, ec2]) target_code = unet_3x_with_res_in_mid(latent_img, 32, norm2d=norm2d) out0 = layer_utils.conv_block_2d(target_code, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x predicted_image = layers.Conv2D(3, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(out0) # 256x256x3 return tf.keras.Model(inputs=[voxels, rerendering, light_pos], outputs=[predicted_image]) def neural_voxel_renderer_plus_tf2(size=4, norm2d='batchnorm', norm3d='batchnorm'): """Neural Voxel Renderer + keras model for tf2.""" with tf.name_scope('Network/'): voxels = layers.Input(shape=[128, 128, 128, 4]) rerendering = layers.Input(shape=[256, 256, 3]) light_pos = layers.Input(shape=[3]) nf_2d = 512 with tf.name_scope('VoxelProcessing'): vol0_a = layer_utils.conv_block_3d(voxels, nfilters=16, size=size, strides=2, normalization=norm3d) # 64x64x64x16 vol0_b = layer_utils.conv_block_3d(vol0_a, nfilters=16, size=size, strides=1, normalization=norm3d) # 64x64x64x16 vol1_a = layer_utils.conv_block_3d(vol0_b, nfilters=16, size=size, strides=2, normalization=norm3d) # 32x32x32x16 vol1_b = layer_utils.conv_block_3d(vol1_a, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 vol1_c = layer_utils.conv_block_3d(vol1_b, nfilters=32, size=size, strides=1, normalization=norm3d) # 32x32x32x32 shortcut = vol1_c vol_a1 = layer_utils.residual_block_3d(vol1_c, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a2 = layer_utils.residual_block_3d(vol_a1, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a3 = layer_utils.residual_block_3d(vol_a2, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a4 = layer_utils.residual_block_3d(vol_a3, 32, strides=(1, 1, 1), normalization=norm3d) # 32x vol_a5 = layer_utils.residual_block_3d(vol_a4, 32, strides=(1, 1, 1), normalization=norm3d) # 32x encoded_vol = layers.add([shortcut, vol_a5]) encoded_vol = layers.Reshape([32, 32, 32*32])(encoded_vol) encoded_vol = layers.Conv2D(nf_2d, kernel_size=1, strides=(1, 1), padding='same', kernel_initializer=initializer)(encoded_vol) latent_projection = layers.LeakyReLU()(encoded_vol) # 32x32x512 with tf.name_scope('ProjectionProcessing'): shortcut = latent_projection # 32x32xnf_2d e1 = layer_utils.residual_block_2d(latent_projection, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e2 = layer_utils.residual_block_2d(e1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e3 = layer_utils.residual_block_2d(e2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e4 = layer_utils.residual_block_2d(e3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d e5 = layer_utils.residual_block_2d(e4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d encoded_proj = layers.add([shortcut, e5]) # 32x32xnf_2d with tf.name_scope('LightProcessing'): fc_light = layers.Dense(64, kernel_initializer=initializer)(light_pos) light_code = layers.Dense(64, kernel_initializer=initializer)(fc_light) light_code = \ layers.Lambda(lambda v: tf.tile(v[0], [1, 32*32]))([light_code]) light_code = layers.Reshape((32, 32, 64))(light_code) # 32x32x64 with tf.name_scope('Merger'): latent_code_final = layers.concatenate([encoded_proj, light_code]) latent_code_final = layer_utils.conv_block_2d(latent_code_final, nfilters=nf_2d, size=size, strides=1, normalization=norm3d) shortcut = latent_code_final m1 = layer_utils.residual_block_2d(latent_code_final, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m2 = layer_utils.residual_block_2d(m1, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m3 = layer_utils.residual_block_2d(m2, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m4 = layer_utils.residual_block_2d(m3, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d m5 = layer_utils.residual_block_2d(m4, nfilters=nf_2d, strides=(1, 1), normalization=norm2d) # 32x32xnf_2d latent_code_final2 = layers.add([shortcut, m5]) # 32x32xnf_2d with tf.name_scope('Decoder'): d7 = layer_utils.conv_t_block_2d(latent_code_final2, nfilters=128, size=size, strides=2, normalization=norm2d) # 64x64x128 d7 = layer_utils.conv_block_2d(d7, nfilters=128, size=size, strides=1, normalization=norm2d) # 64x64x128 d8 = layer_utils.conv_t_block_2d(d7, nfilters=64, size=size, strides=2, normalization=norm2d) # 128x128x64 d8 = layer_utils.conv_block_2d(d8, nfilters=64, size=size, strides=1, normalization=norm2d) # 128x128x64 d9 = layer_utils.conv_t_block_2d(d8, nfilters=32, size=size, strides=2, normalization=norm2d) # 256x256x32 d9 = layer_utils.conv_block_2d(d9, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x256x32 rendered_image = layers.Conv2D(32, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(d9) # 256x256x3 with tf.name_scope('ImageProcessingNetwork'): ec1 = layer_utils.conv_block_2d(rerendering, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x ec2 = layer_utils.conv_block_2d(ec1, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x with tf.name_scope('NeuralRerenderingNetwork'): latent_img = layers.add([rendered_image, ec2]) target_code = unet_3x_with_res_in_mid(latent_img, 32, norm2d=norm2d) out0 = layer_utils.conv_block_2d(target_code, nfilters=32, size=size, strides=1, normalization=norm2d) # 256x predicted_image = layers.Conv2D(3, size, strides=1, padding='same', kernel_initializer=initializer, use_bias=False)(out0) # 256x256x3 return tf.keras.Model(inputs=[voxels, rerendering, light_pos], outputs=[predicted_image])
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/image/tests/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/image/pyramid.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements image pyramid functionalities. More details about image pyramids can be found on [this page.] (https://en.wikipedia.org/wiki/Pyramid_(image_processing)) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def _downsample(image, kernel): """Downsamples the image using a convolution with stride 2. Args: image: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. kernel: A tensor of shape `[H_k, W_k, C, C]`, where `H_k` and `W_k` are the height and width of the kernel. Returns: A tensor of shape `[B, H_d, W_d, C]`, where `H_d` and `W_d` are the height and width of the downsampled image. """ return tf.nn.conv2d( input=image, filters=kernel, strides=[1, 2, 2, 1], padding="SAME") def _binomial_kernel(num_channels, dtype=tf.float32): """Creates a 5x5 binomial kernel. Args: num_channels: The number of channels of the image to filter. dtype: The type of an element in the kernel. Returns: A tensor of shape `[5, 5, num_channels, num_channels]`. """ kernel = np.array((1., 4., 6., 4., 1.), dtype=dtype.as_numpy_dtype) kernel = np.outer(kernel, kernel) kernel /= np.sum(kernel) kernel = kernel[:, :, np.newaxis, np.newaxis] return tf.constant(kernel, dtype=dtype) * tf.eye(num_channels, dtype=dtype) def _build_pyramid(image, sampler, num_levels): """Creates the different levels of the pyramid. Args: image: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. sampler: A function to execute for each level (_upsample or _downsample). num_levels: The number of levels. Returns: A list containing `num_levels` tensors of shape `[B, H_i, W_i, C]`, where `H_i` and `W_i` are the height and width of the image for the level i. """ kernel = _binomial_kernel(tf.shape(input=image)[3], dtype=image.dtype) levels = [image] for _ in range(num_levels): image = sampler(image, kernel) levels.append(image) return levels def _split(image, kernel): """Splits the image into high and low frequencies. This is achieved by smoothing the input image and substracting the smoothed version from the input. Args: image: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. kernel: A tensor of shape `[H_k, W_k, C, C]`, where `H_k` and `W_k` are the height and width of the kernel. Returns: A tuple of two tensors of shape `[B, H, W, C]` and `[B, H_d, W_d, C]`, where the first one contains the high frequencies of the image and the second one the low frequencies. `H_d` and `W_d` are the height and width of the downsampled low frequency image. """ low = _downsample(image, kernel) high = image - _upsample(low, kernel, tf.shape(input=image)) return high, low def _upsample(image, kernel, output_shape=None): """Upsamples the image using a transposed convolution with stride 2. Args: image: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. kernel: A tensor of shape `[H_k, W_k, C, C]`, where `H_k` and `W_k` are the height and width of the kernel. output_shape: The output shape. Returns: A tensor of shape `[B, H_u, W_u, C]`, where `H_u` and `W_u` are the height and width of the upsampled image. """ if output_shape is None: output_shape = tf.shape(input=image) output_shape = (output_shape[0], output_shape[1] * 2, output_shape[2] * 2, output_shape[3]) return tf.nn.conv2d_transpose( image, kernel * 4.0, output_shape=output_shape, strides=[1, 2, 2, 1], padding="SAME") def downsample(image, num_levels, name=None): """Generates the different levels of the pyramid (downsampling). Args: image: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. num_levels: The number of levels to generate. name: A name for this op that defaults to "pyramid_downsample". Returns: A list containing `num_levels` tensors of shape `[B, H_i, W_i, C]`, where `H_i` and `W_i` are the height and width of the downsampled image for the level i. Raises: ValueError: If the shape of `image` is not supported. """ with tf.compat.v1.name_scope(name, "pyramid_downsample", [image]): image = tf.convert_to_tensor(value=image) shape.check_static(tensor=image, tensor_name="image", has_rank=4) return _build_pyramid(image, _downsample, num_levels) def merge(levels, name=None): """Merges the different levels of the pyramid back to an image. Args: levels: A list containing tensors of shape `[B, H_i, W_i, C]`, where `B` is the batch size, H_i and W_i are the height and width of the image for the level i, and `C` the number of channels of the image. name: A name for this op that defaults to "pyramid_merge". Returns: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. Raises: ValueError: If the shape of the elements of `levels` is not supported. """ with tf.compat.v1.name_scope(name, "pyramid_merge", levels): levels = [tf.convert_to_tensor(value=level) for level in levels] for index, level in enumerate(levels): shape.check_static( tensor=level, tensor_name="level {}".format(index), has_rank=4) image = levels[-1] kernel = _binomial_kernel(tf.shape(input=image)[3], dtype=image.dtype) for level in reversed(levels[:-1]): image = _upsample(image, kernel, tf.shape(input=level)) + level return image def split(image, num_levels, name=None): """Generates the different levels of the pyramid. Args: image: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. num_levels: The number of levels to generate. name: A name for this op that defaults to "pyramid_split". Returns: A list containing `num_levels` tensors of shape `[B, H_i, W_i, C]`, where `H_i` and `W_i` are the height and width of the image for the level i. Raises: ValueError: If the shape of `image` is not supported. """ with tf.compat.v1.name_scope(name, "pyramid_split", [image]): image = tf.convert_to_tensor(value=image) shape.check_static(tensor=image, tensor_name="image", has_rank=4) kernel = _binomial_kernel(tf.shape(input=image)[3], dtype=image.dtype) low = image levels = [] for _ in range(num_levels): high, low = _split(low, kernel) levels.append(high) levels.append(low) return levels def upsample(image, num_levels, name=None): """Generates the different levels of the pyramid (upsampling). Args: image: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. num_levels: The number of levels to generate. name: A name for this op that defaults to "pyramid_upsample". Returns: A list containing `num_levels` tensors of shape `[B, H_i, W_i, C]`, where `H_i` and `W_i` are the height and width of the upsampled image for the level i. Raises: ValueError: If the shape of `image` is not supported. """ with tf.compat.v1.name_scope(name, "pyramid_upsample", [image]): image = tf.convert_to_tensor(value=image) shape.check_static(tensor=image, tensor_name="image", has_rank=4) return _build_pyramid(image, _upsample, num_levels) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements image pyramid functionalities. More details about image pyramids can be found on [this page.] (https://en.wikipedia.org/wiki/Pyramid_(image_processing)) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def _downsample(image, kernel): """Downsamples the image using a convolution with stride 2. Args: image: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. kernel: A tensor of shape `[H_k, W_k, C, C]`, where `H_k` and `W_k` are the height and width of the kernel. Returns: A tensor of shape `[B, H_d, W_d, C]`, where `H_d` and `W_d` are the height and width of the downsampled image. """ return tf.nn.conv2d( input=image, filters=kernel, strides=[1, 2, 2, 1], padding="SAME") def _binomial_kernel(num_channels, dtype=tf.float32): """Creates a 5x5 binomial kernel. Args: num_channels: The number of channels of the image to filter. dtype: The type of an element in the kernel. Returns: A tensor of shape `[5, 5, num_channels, num_channels]`. """ kernel = np.array((1., 4., 6., 4., 1.), dtype=dtype.as_numpy_dtype) kernel = np.outer(kernel, kernel) kernel /= np.sum(kernel) kernel = kernel[:, :, np.newaxis, np.newaxis] return tf.constant(kernel, dtype=dtype) * tf.eye(num_channels, dtype=dtype) def _build_pyramid(image, sampler, num_levels): """Creates the different levels of the pyramid. Args: image: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. sampler: A function to execute for each level (_upsample or _downsample). num_levels: The number of levels. Returns: A list containing `num_levels` tensors of shape `[B, H_i, W_i, C]`, where `H_i` and `W_i` are the height and width of the image for the level i. """ kernel = _binomial_kernel(tf.shape(input=image)[3], dtype=image.dtype) levels = [image] for _ in range(num_levels): image = sampler(image, kernel) levels.append(image) return levels def _split(image, kernel): """Splits the image into high and low frequencies. This is achieved by smoothing the input image and substracting the smoothed version from the input. Args: image: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. kernel: A tensor of shape `[H_k, W_k, C, C]`, where `H_k` and `W_k` are the height and width of the kernel. Returns: A tuple of two tensors of shape `[B, H, W, C]` and `[B, H_d, W_d, C]`, where the first one contains the high frequencies of the image and the second one the low frequencies. `H_d` and `W_d` are the height and width of the downsampled low frequency image. """ low = _downsample(image, kernel) high = image - _upsample(low, kernel, tf.shape(input=image)) return high, low def _upsample(image, kernel, output_shape=None): """Upsamples the image using a transposed convolution with stride 2. Args: image: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. kernel: A tensor of shape `[H_k, W_k, C, C]`, where `H_k` and `W_k` are the height and width of the kernel. output_shape: The output shape. Returns: A tensor of shape `[B, H_u, W_u, C]`, where `H_u` and `W_u` are the height and width of the upsampled image. """ if output_shape is None: output_shape = tf.shape(input=image) output_shape = (output_shape[0], output_shape[1] * 2, output_shape[2] * 2, output_shape[3]) return tf.nn.conv2d_transpose( image, kernel * 4.0, output_shape=output_shape, strides=[1, 2, 2, 1], padding="SAME") def downsample(image, num_levels, name=None): """Generates the different levels of the pyramid (downsampling). Args: image: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. num_levels: The number of levels to generate. name: A name for this op that defaults to "pyramid_downsample". Returns: A list containing `num_levels` tensors of shape `[B, H_i, W_i, C]`, where `H_i` and `W_i` are the height and width of the downsampled image for the level i. Raises: ValueError: If the shape of `image` is not supported. """ with tf.compat.v1.name_scope(name, "pyramid_downsample", [image]): image = tf.convert_to_tensor(value=image) shape.check_static(tensor=image, tensor_name="image", has_rank=4) return _build_pyramid(image, _downsample, num_levels) def merge(levels, name=None): """Merges the different levels of the pyramid back to an image. Args: levels: A list containing tensors of shape `[B, H_i, W_i, C]`, where `B` is the batch size, H_i and W_i are the height and width of the image for the level i, and `C` the number of channels of the image. name: A name for this op that defaults to "pyramid_merge". Returns: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. Raises: ValueError: If the shape of the elements of `levels` is not supported. """ with tf.compat.v1.name_scope(name, "pyramid_merge", levels): levels = [tf.convert_to_tensor(value=level) for level in levels] for index, level in enumerate(levels): shape.check_static( tensor=level, tensor_name="level {}".format(index), has_rank=4) image = levels[-1] kernel = _binomial_kernel(tf.shape(input=image)[3], dtype=image.dtype) for level in reversed(levels[:-1]): image = _upsample(image, kernel, tf.shape(input=level)) + level return image def split(image, num_levels, name=None): """Generates the different levels of the pyramid. Args: image: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. num_levels: The number of levels to generate. name: A name for this op that defaults to "pyramid_split". Returns: A list containing `num_levels` tensors of shape `[B, H_i, W_i, C]`, where `H_i` and `W_i` are the height and width of the image for the level i. Raises: ValueError: If the shape of `image` is not supported. """ with tf.compat.v1.name_scope(name, "pyramid_split", [image]): image = tf.convert_to_tensor(value=image) shape.check_static(tensor=image, tensor_name="image", has_rank=4) kernel = _binomial_kernel(tf.shape(input=image)[3], dtype=image.dtype) low = image levels = [] for _ in range(num_levels): high, low = _split(low, kernel) levels.append(high) levels.append(low) return levels def upsample(image, num_levels, name=None): """Generates the different levels of the pyramid (upsampling). Args: image: A tensor of shape `[B, H, W, C]`, where `B` is the batch size, `H` the height of the image, `W` the width of the image, and `C` the number of channels of the image. num_levels: The number of levels to generate. name: A name for this op that defaults to "pyramid_upsample". Returns: A list containing `num_levels` tensors of shape `[B, H_i, W_i, C]`, where `H_i` and `W_i` are the height and width of the upsampled image for the level i. Raises: ValueError: If the shape of `image` is not supported. """ with tf.compat.v1.name_scope(name, "pyramid_upsample", [image]): image = tf.convert_to_tensor(value=image) shape.check_static(tensor=image, tensor_name="image", has_rank=4) return _build_pyramid(image, _upsample, num_levels) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/geometry/representation/triangle.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tensorflow triangle utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.math import vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def normal(v0, v1, v2, clockwise=False, normalize=True, name=None): """Computes face normals (triangles). Note: In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. Args: v0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the first vertex of a triangle. v1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the second vertex of a triangle. v2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the third vertex of a triangle. clockwise: Winding order to determine front-facing triangles. normalize: A `bool` indicating whether output normals should be normalized by the function. name: A name for this op. Defaults to "triangle_normal". Returns: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized vector. Raises: ValueError: If the shape of `v0`, `v1`, or `v2` is not supported. """ with tf.compat.v1.name_scope(name, "triangle_normal", [v0, v1, v2]): v0 = tf.convert_to_tensor(value=v0) v1 = tf.convert_to_tensor(value=v1) v2 = tf.convert_to_tensor(value=v2) shape.check_static(tensor=v0, tensor_name="v0", has_dim_equals=(-1, 3)) shape.check_static(tensor=v1, tensor_name="v1", has_dim_equals=(-1, 3)) shape.check_static(tensor=v2, tensor_name="v2", has_dim_equals=(-1, 3)) shape.compare_batch_dimensions( tensors=(v0, v1, v2), last_axes=-2, broadcast_compatible=True) normal_vector = vector.cross(v1 - v0, v2 - v0, axis=-1) normal_vector = asserts.assert_nonzero_norm(normal_vector) if not clockwise: normal_vector *= -1.0 if normalize: return tf.nn.l2_normalize(normal_vector, axis=-1) return normal_vector def area(v0, v1, v2, name=None): """Computes triangle areas. Note: Computed triangle area = 0.5 * | e1 x e2 | where e1 and e2 are edges of triangle. A degenerate triangle will return 0 area, whereas the normal for a degenerate triangle is not defined. In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. Args: v0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the first vertex of a triangle. v1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the second vertex of a triangle. v2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the third vertex of a triangle. name: A name for this op. Defaults to "triangle_area". Returns: A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents a normalized vector. """ with tf.compat.v1.name_scope(name, "triangle_area", [v0, v1, v2]): v0 = tf.convert_to_tensor(value=v0) v1 = tf.convert_to_tensor(value=v1) v2 = tf.convert_to_tensor(value=v2) shape.check_static(tensor=v0, tensor_name="v0", has_dim_equals=(-1, 3)) shape.check_static(tensor=v1, tensor_name="v1", has_dim_equals=(-1, 3)) shape.check_static(tensor=v2, tensor_name="v2", has_dim_equals=(-1, 3)) shape.compare_batch_dimensions( tensors=(v0, v1, v2), last_axes=-2, broadcast_compatible=True) normals = vector.cross(v1 - v0, v2 - v0, axis=-1) return 0.5 * tf.linalg.norm(tensor=normals, axis=-1, keepdims=True) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tensorflow triangle utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.math import vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def normal(v0, v1, v2, clockwise=False, normalize=True, name=None): """Computes face normals (triangles). Note: In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. Args: v0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the first vertex of a triangle. v1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the second vertex of a triangle. v2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the third vertex of a triangle. clockwise: Winding order to determine front-facing triangles. normalize: A `bool` indicating whether output normals should be normalized by the function. name: A name for this op. Defaults to "triangle_normal". Returns: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized vector. Raises: ValueError: If the shape of `v0`, `v1`, or `v2` is not supported. """ with tf.compat.v1.name_scope(name, "triangle_normal", [v0, v1, v2]): v0 = tf.convert_to_tensor(value=v0) v1 = tf.convert_to_tensor(value=v1) v2 = tf.convert_to_tensor(value=v2) shape.check_static(tensor=v0, tensor_name="v0", has_dim_equals=(-1, 3)) shape.check_static(tensor=v1, tensor_name="v1", has_dim_equals=(-1, 3)) shape.check_static(tensor=v2, tensor_name="v2", has_dim_equals=(-1, 3)) shape.compare_batch_dimensions( tensors=(v0, v1, v2), last_axes=-2, broadcast_compatible=True) normal_vector = vector.cross(v1 - v0, v2 - v0, axis=-1) normal_vector = asserts.assert_nonzero_norm(normal_vector) if not clockwise: normal_vector *= -1.0 if normalize: return tf.nn.l2_normalize(normal_vector, axis=-1) return normal_vector def area(v0, v1, v2, name=None): """Computes triangle areas. Note: Computed triangle area = 0.5 * | e1 x e2 | where e1 and e2 are edges of triangle. A degenerate triangle will return 0 area, whereas the normal for a degenerate triangle is not defined. In the following, A1 to An are optional batch dimensions, which must be broadcast compatible. Args: v0: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the first vertex of a triangle. v1: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the second vertex of a triangle. v2: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the third vertex of a triangle. name: A name for this op. Defaults to "triangle_area". Returns: A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents a normalized vector. """ with tf.compat.v1.name_scope(name, "triangle_area", [v0, v1, v2]): v0 = tf.convert_to_tensor(value=v0) v1 = tf.convert_to_tensor(value=v1) v2 = tf.convert_to_tensor(value=v2) shape.check_static(tensor=v0, tensor_name="v0", has_dim_equals=(-1, 3)) shape.check_static(tensor=v1, tensor_name="v1", has_dim_equals=(-1, 3)) shape.check_static(tensor=v2, tensor_name="v2", has_dim_equals=(-1, 3)) shape.compare_batch_dimensions( tensors=(v0, v1, v2), last_axes=-2, broadcast_compatible=True) normals = vector.cross(v1 - v0, v2 - v0, axis=-1) return 0.5 * tf.linalg.norm(tensor=normals, axis=-1, keepdims=True) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/geometry/convolution/tests/graph_pooling_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tensorflow_graphics.geometry.convolution.tests.graph_pooling.""" # pylint: disable=protected-access import itertools from absl.testing import parameterized import numpy as np import tensorflow as tf import tensorflow_graphics.geometry.convolution.graph_pooling as gp from tensorflow_graphics.geometry.convolution.tests import utils_test from tensorflow_graphics.util import test_case def _dense_to_sparse(data): """Convert a numpy array to a tf.SparseTensor.""" return utils_test._dense_to_sparse(data) def _batch_sparse_eye(batch_shape, num_vertices, dtype): """Generate a batch of identity matrices.""" eye = np.eye(num_vertices, dtype=dtype) num_batch_dims = len(batch_shape) expand_shape = np.concatenate((np.ones( num_batch_dims, dtype=np.int32), (num_vertices, num_vertices)), axis=0) eye = np.reshape(eye, expand_shape) tile_shape = np.concatenate((batch_shape, (1, 1)), axis=0) return _dense_to_sparse(np.tile(eye, tile_shape)) class GraphPoolingTestPoolTests(test_case.TestCase): @parameterized.parameters( ("'sizes' must have an integer type.", np.float32, np.float32, np.float32), ("'data' must have a float type.", np.int32, np.float32, np.int32), ("'pool_map' and 'data' must have the same type.", np.float32, np.float64, np.int32)) def test_pool_exception_raised_types(self, err_msg, data_type, pool_map_type, sizes_type): """Tests the correct exceptions are raised for invalid types.""" data = np.ones((2, 3, 3), dtype=data_type) pool_map = _dense_to_sparse(np.ones((2, 3, 3), dtype=pool_map_type)) sizes = np.array(((1, 2), (2, 3)), dtype=sizes_type) with self.assertRaisesRegexp(TypeError, err_msg): gp.pool(data, pool_map, sizes) @parameterized.parameters( ('data must have a rank greater than 1', (3,), (3,), None), ('pool_map must have a rank of 2', (3, 3), (3,), None), ('sizes must have a rank of 3', (4, 5, 3, 2), (4, 5, 3, 3), (3, 2)), ) def test_pool_exception_raised_shapes(self, err_msg, data_shape, pool_map_shape, sizes_shape): """Tests the correct exceptions are raised for invalid shapes.""" data = np.ones(data_shape, dtype=np.float32) pool_map = _dense_to_sparse(np.ones(pool_map_shape, dtype=np.float32)) if sizes_shape is not None: sizes = np.ones(sizes_shape, dtype=np.int32) else: sizes = None with self.assertRaisesRegexp(ValueError, err_msg): gp.pool(data, pool_map, sizes) def test_pool_exception_raised_algorithm(self): """Tests the correct exception is raised for an invalid algorithm.""" data = np.ones(shape=(2, 2)) pool_map = _dense_to_sparse(np.ones(shape=(2, 2))) with self.assertRaisesRegexp( ValueError, 'The pooling method must be "weighted" or "max"'): gp.pool(data, pool_map, sizes=None, algorithm='mean') @parameterized.parameters( ((2, 3), 4, 3, np.float32), ((1,), 6, 1, np.float32), ((4, 1, 3), 9, 7, np.float64), ((2, 8, 4, 6), 19, 11, np.float64), ) def test_pool_identity(self, batch_shape, num_vertices, num_features, data_type): """Tests graph pooling with identity maps.""" data_shape = np.concatenate((batch_shape, (num_vertices, num_features))) data = np.random.uniform(size=data_shape).astype(data_type) pool_map = _batch_sparse_eye(batch_shape, num_vertices, data_type) pooled_max = gp.pool(data, pool_map, sizes=None, algorithm='max', name=None) pooled_weighted = gp.pool( data, pool_map, sizes=None, algorithm='weighted', name=None) self.assertAllClose(pooled_max, data) self.assertAllClose(pooled_weighted, data) def test_pool_preset_padded(self): """Tests pooling with preset data and padding.""" data = np.reshape(np.arange(12).astype(np.float32), (2, 3, 2)) sizes = ((2, 3), (3, 3)) pool_map = _dense_to_sparse( np.array((((0.5, 0.5, 0.), (0., 0., 1.), (0., 0., 0.)), ((1., 0., 0.), (0., 1., 0.), (0., 0., 1.))), dtype=np.float32)) pooled_max = gp.pool(data, pool_map, sizes, algorithm='max') pooled_weighted = gp.pool(data, pool_map, sizes, algorithm='weighted') true_max = (((2., 3.), (4., 5.), (0., 0.)), ((6., 7.), (8., 9.), (10., 11.))) true_weighted = (((1., 2.), (4., 5.), (0., 0.)), ((6., 7.), (8., 9.), (10., 11.))) self.assertAllClose(pooled_max, true_max) self.assertAllClose(pooled_weighted, true_weighted) def test_pool_preset(self): """Tests pooling with preset data.""" pool_map = np.array(((0.5, 0.5, 0., 0.), (0., 0., 0.5, 0.5)), dtype=np.float32) pool_map = _dense_to_sparse(pool_map) data = np.reshape(np.arange(8).astype(np.float32), (4, 2)) max_true = data[(1, 3), :] max_weighted = (data[(0, 2), :] + max_true) * 0.5 pooled_max = gp.pool(data, pool_map, sizes=None, algorithm='max', name=None) pooled_weighted = gp.pool( data, pool_map, sizes=None, algorithm='weighted', name=None) self.assertAllClose(pooled_max, max_true) self.assertAllClose(pooled_weighted, max_weighted) @parameterized.parameters((20, 10, 3), (2, 1, 1), (2, 5, 4), (2, 1, 3)) def test_pool_random(self, num_input_vertices, num_output_vertices, num_features): """Tests pooling with random inputs.""" pool_map = 0.001 + np.random.uniform( size=(num_output_vertices, num_input_vertices)) data = np.random.uniform(size=(num_input_vertices, num_features)) true_weighted = np.matmul(pool_map, data) true_max = np.tile( np.max(data, axis=0, keepdims=True), (num_output_vertices, 1)) pool_map = _dense_to_sparse(pool_map) with self.subTest(name='max'): pooled_max = gp.pool(data, pool_map, None, algorithm='max') self.assertAllClose(pooled_max, true_max) with self.subTest(name='weighted'): pooled_weighted = gp.pool(data, pool_map, None, algorithm='weighted') self.assertAllClose(pooled_weighted, true_weighted) def test_pool_jacobian(self): """Tests the jacobian is correct.""" sizes = ((2, 4), (3, 5)) data_init = np.random.uniform(size=(2, 5, 3)) pool_map = np.random.uniform(size=(2, 3, 5)) data_init[0, -1, :] = 0. pool_map[0, -1, :] = 0. pool_map = _dense_to_sparse(pool_map) def gp_pool(data, algorithm): return gp.pool(data, pool_map, sizes, algorithm=algorithm) with self.subTest(name='max'): self.assert_jacobian_is_correct_fn(lambda data: gp_pool(data, 'max'), [data_init]) with self.subTest(name='weighted'): self.assert_jacobian_is_correct_fn(lambda data: gp_pool(data, 'weighted'), [data_init]) class GraphPoolingTestUnpoolTests(test_case.TestCase): @parameterized.parameters( ("'sizes' must have an integer type.", np.float32, np.float32, np.float32), ("'data' must have a float type.", np.int32, np.float32, np.int32), ("'pool_map' and 'data' must have the same type.", np.float32, np.float64, np.int32)) def test_unpool_exception_raised_types(self, err_msg, data_type, pool_map_type, sizes_type): """Tests the correct exceptions are raised for invalid types.""" data = np.ones((2, 3, 3), dtype=data_type) pool_map = _dense_to_sparse(np.ones((2, 3, 3), dtype=pool_map_type)) sizes = np.array(((1, 2), (2, 3)), dtype=sizes_type) with self.assertRaisesRegexp(TypeError, err_msg): gp.unpool(data, pool_map, sizes) @parameterized.parameters( ('data must have a rank greater than 1', (3,), (3,), None), ('pool_map must have a rank of 2', (3, 3), (3,), None), ('sizes must have a rank of 3', (4, 5, 3, 2), (4, 5, 3, 3), (3, 2)), ('data must have a rank less than 6', (2, 3, 4, 5, 3, 2), (2, 3, 4, 5, 3, 3), None), ) def test_unpool_exception_raised_shapes(self, err_msg, data_shape, pool_map_shape, sizes_shape): """Tests the correct exceptions are raised for invalid shapes.""" data = np.ones(data_shape, dtype=np.float32) pool_map = _dense_to_sparse(np.ones(pool_map_shape, dtype=np.float32)) if sizes_shape is not None: sizes = np.ones(sizes_shape, dtype=np.int32) else: sizes = None with self.assertRaisesRegexp(ValueError, err_msg): gp.unpool(data, pool_map, sizes) @parameterized.parameters( ((2, 3), 4, 3, np.float32), ((1,), 6, 1, np.float32), ((4, 1, 3), 9, 7, np.float64), ((2, 8, 4), 19, 11, np.float64), ) def test_unpool_identity(self, batch_shape, num_vertices, num_features, data_type): """Tests graph unpooling with identity maps.""" data_shape = np.concatenate((batch_shape, (num_vertices, num_features))) data = np.random.uniform(size=data_shape).astype(data_type) pool_map = _batch_sparse_eye(batch_shape, num_vertices, data_type) unpooled = gp.unpool(data, pool_map, sizes=None) self.assertAllClose(unpooled, data) def test_unpool_preset_padded(self): """Tests pooling with preset data and padding.""" data = np.reshape(np.arange(12).astype(np.float32), (2, 3, 2)) data[0, -1, :] = 0. sizes = ((2, 3), (3, 3)) pool_map = _dense_to_sparse( np.array((((0.5, 0.5, 0.), (0., 0., 1.), (0., 0., 0.)), ((1., 0., 0.), (0., 1., 0.), (0., 0., 1.))), dtype=np.float32)) unpooled = gp.unpool(data, pool_map, sizes) true = (((0., 1.), (0., 1.), (2., 3.)), ((6., 7.), (8., 9.), (10., 11.))) self.assertAllClose(unpooled, true) @parameterized.parameters((20, 4), (2, 1), (12, 4), (6, 3)) def test_unpool_random(self, num_vertices, num_features): """Tests pooling with random data inputs.""" output_vertices = num_vertices // 2 pool_map = np.zeros(shape=(output_vertices, num_vertices), dtype=np.float32) for i in range(output_vertices): pool_map[i, (i * 2, i * 2 + 1)] = (0.5, 0.5) data = np.random.uniform(size=(output_vertices, num_features)).astype(np.float32) unpooled = gp.unpool( data, _dense_to_sparse(pool_map), sizes=None, name=None) with self.subTest(name='direct_unpool'): true = np.zeros(shape=(num_vertices, num_features)).astype(np.float32) true[0::2, :] = data true[1::2, :] = data self.assertAllClose(unpooled, true) with self.subTest(name='permute_pool_map'): permutation = np.random.permutation(num_vertices) pool_map_permute = pool_map[:, permutation] unpooled_permute = gp.unpool(data, _dense_to_sparse(pool_map_permute), None) true_permute = true[permutation, :] self.assertAllClose(unpooled_permute, true_permute) def test_unpool_jacobian_random(self): """Tests the jacobian is correct.""" sizes = ((2, 4), (3, 5)) data_init = np.random.uniform(size=(2, 3, 6)) pool_map = np.random.uniform(size=(2, 3, 5)) data_init[0, -1, :] = 0. pool_map[0, -1, :] = 0. pool_map = _dense_to_sparse(pool_map) def gp_unpool(data): return gp.unpool(data, pool_map, sizes) self.assert_jacobian_is_correct_fn(gp_unpool, [data_init]) class GraphPoolingUpsampleTransposeConvolutionTests(test_case.TestCase): @parameterized.parameters( ("'sizes' must have an integer type.", np.float32, np.float32, np.float32), ("'data' must have a float type.", np.int32, np.float32, np.int32), ("'pool_map' and 'data' must have the same type.", np.float32, np.float64, np.int32)) def test_upsample_transposed_convolution_exception_raised_types( self, err_msg, data_type, pool_map_type, sizes_type): """Tests the correct exceptions are raised for invalid types.""" data = np.ones((2, 3, 3), dtype=data_type) pool_map = _dense_to_sparse(np.ones((2, 3, 3), dtype=pool_map_type)) sizes = np.array(((1, 2), (2, 3)), dtype=sizes_type) with self.assertRaisesRegexp(TypeError, err_msg): gp.upsample_transposed_convolution( data, pool_map, sizes, kernel_size=1, transposed_convolution_op=None) @parameterized.parameters( ('data must have a rank greater than 1', (3,), (3,), None), ('pool_map must have a rank of 2', (3, 3), (3,), None), ('sizes must have a rank of 3', (4, 5, 3, 2), (4, 5, 3, 3), (3, 2)), ('data must have a rank less than 6', (2, 3, 4, 5, 3, 2), (2, 3, 4, 5, 3, 3), None), ) def test_upsample_transposed_convolution_exception_raised_shapes( self, err_msg, data_shape, pool_map_shape, sizes_shape): """Tests the correct exceptions are raised for invalid shapes.""" data = np.ones(data_shape, dtype=np.float32) pool_map = _dense_to_sparse(np.ones(pool_map_shape, dtype=np.float32)) if sizes_shape is not None: sizes = np.ones(sizes_shape, dtype=np.int32) else: sizes = None with self.assertRaisesRegexp(ValueError, err_msg): gp.upsample_transposed_convolution( data, pool_map, sizes, kernel_size=1, transposed_convolution_op=None) def test_upsample_transposed_convolution_exception_raised_callable(self): """Tests the correct exception is raised for a invalid convolution op.""" data = np.ones((5, 3)) pool_map = _dense_to_sparse(np.eye(5)) err_msg = "'transposed_convolution_op' must be callable." with self.assertRaisesRegexp(TypeError, err_msg): gp.upsample_transposed_convolution( data, pool_map, sizes=None, kernel_size=1, transposed_convolution_op=1) @parameterized.parameters((1, 1, 1, np.float32), (5, 3, 1, np.float32), (3, 6, 15, np.float64)) def test_upsample_transposed_convolution_zero_kernel(self, num_vertices, num_features, kernel_size, data_type): """Tests the upsampling with a zero kernel.""" data = np.random.uniform(size=(num_vertices, num_features)).astype(data_type) pool_map = np.zeros( shape=(num_vertices, num_vertices * kernel_size), dtype=data_type) for i in range(num_vertices): pool_map[i, np.arange(kernel_size * i, kernel_size * (i + 1))] = (1.0 / kernel_size) pool_map = _dense_to_sparse(pool_map) # Transposed convolution op with a zero kernel. transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=num_features, kernel_size=(1, kernel_size), strides=(1, kernel_size), padding='valid', use_bias=False, kernel_initializer=tf.compat.v1.keras.initializers.zeros()) upsampled = gp.upsample_transposed_convolution( data, pool_map, sizes=None, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) # Initializes variables of the transpose conv layer. self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllEqual( tf.shape(input=upsampled), (num_vertices * kernel_size, num_features)) self.assertAllEqual(upsampled, tf.zeros_like(upsampled)) @parameterized.parameters( itertools.product((3,), (6,), (3,), range(3), range(6), range(6)),) def test_upsample_transposed_convolution_selector_kernel_random( self, num_vertices, num_features, kernel_size, kernel_index, feature1_index, feature2_index): """Tests the upsampling with an indicator kernel.""" data = np.random.uniform(size=(num_vertices, num_features)).astype(np.float32) pool_map = np.zeros( shape=(num_vertices, num_vertices * kernel_size), dtype=np.float32) for i in range(num_vertices): pool_map[i, np.arange(kernel_size * i, kernel_size * (i + 1))] = (1.0 / kernel_size) pool_map = _dense_to_sparse(pool_map) selection = np.zeros( shape=(1, kernel_size, num_features, num_features), dtype=np.float32) selection[0, kernel_index, feature1_index, feature2_index] = 1. initializer = tf.compat.v1.constant_initializer(value=selection) transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=num_features, kernel_size=(1, kernel_size), strides=(1, kernel_size), padding='valid', use_bias=False, kernel_initializer=initializer) true = np.zeros( shape=(num_vertices * kernel_size, num_features), dtype=np.float32) input_column = feature2_index output_column = feature1_index output_row_start = kernel_index true[output_row_start::kernel_size, output_column] = (data[:, input_column]) upsampled = gp.upsample_transposed_convolution( data, pool_map, sizes=None, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) # Initializes variables of the transpose conv layer. self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllEqual(upsampled, true) def test_upsample_transposed_convolution_preset_padded(self): """Tests upsampling with presets.""" data = np.reshape(np.arange(12).astype(np.float32), (2, 3, 2)) data[0, -1, :] = 0. sizes = ((2, 3), (3, 3)) pool_map = _dense_to_sparse( np.array((((0.5, 0.5, 0.), (0., 0., 1.), (0., 0., 0.)), ((1., 0., 0.), (0., 1., 0.), (0., 0., 1.))), dtype=np.float32)) kernel = np.ones(shape=(1, 2, 2, 2), dtype=np.float32) initializer = tf.compat.v1.constant_initializer(value=kernel) transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=2, kernel_size=(1, 2), strides=(1, 2), padding='valid', use_bias=False, kernel_initializer=initializer) # Convolving with an all-ones kernel is equal to summation of the input. data_sum = np.tile(np.sum(data, axis=-1, keepdims=True), (1, 1, 2)) true = np.zeros(shape=(2, 3, 2), dtype=np.float32) true[0, :, :] = data_sum[0, (0, 0, 1), :] true[1, :, :] = data_sum[1, :, :] upsampled = gp.upsample_transposed_convolution( data, pool_map, sizes=sizes, kernel_size=2, transposed_convolution_op=transposed_convolution_op) # Initializes variables of the transpose conv layer. self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllEqual(upsampled.shape, (2, 3, 2)) self.assertAllClose(upsampled, true) def test_upsample_transposed_convolution_jacobian_random(self): """Tests the jacobian is correct.""" num_filters = 6 kernel_size = 1 data_init = np.random.uniform(size=(2, 5, num_filters)) pool_map = _batch_sparse_eye((2,), 5, np.float64) transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=num_filters, kernel_size=(1, kernel_size), strides=(1, kernel_size), padding='valid', dtype='float64') # Calling the upsample_transposed_convolution to create the variables # in the transposed_convoution. gp.upsample_transposed_convolution( data_init, pool_map, sizes=None, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) # Initializes variables of the transpose conv layer. self.evaluate(tf.compat.v1.global_variables_initializer()) def gp_upsample_transposed_convolution(data): return gp.upsample_transposed_convolution( data, pool_map, sizes=None, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) self.assert_jacobian_is_correct_fn(gp_upsample_transposed_convolution, [data_init]) def test_upsample_transposed_convolution_jacobian_random_padding(self): """Tests the jacobian is correct with padded data.""" num_filters = 6 sizes = ((2, 4), (3, 5)) data_init = np.random.uniform(size=(2, 3, num_filters)) data_init[0, -1, :] = 0. pool_map = np.array( (((0.5, 0.5, 0., 0., 0.), (0., 0., 0.5, 0.5, 0.), (0., 0., 0., 0., 0.)), ((1., 0., 0., 0., 0.), (0., 1. / 3., 1. / 3., 1. / 3., 0.), (0., 0., 0., 0., 1.))), dtype=data_init.dtype) pool_map = _dense_to_sparse(pool_map) kernel_size = 2 transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=num_filters, kernel_size=(1, kernel_size), strides=(1, kernel_size), padding='valid', dtype='float64') # Calling the upsample_transposed_convolution to create the variables # in the transposed_convoution. gp.upsample_transposed_convolution( data_init, pool_map, sizes=sizes, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) # Initializes variables of the transpose conv layer. self.evaluate(tf.compat.v1.global_variables_initializer()) def gp_upsample_transposed_convolution(data): return gp.upsample_transposed_convolution( data, pool_map, sizes=sizes, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) self.assert_jacobian_is_correct_fn(gp_upsample_transposed_convolution, [data_init]) if __name__ == '__main__': test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tensorflow_graphics.geometry.convolution.tests.graph_pooling.""" # pylint: disable=protected-access import itertools from absl.testing import parameterized import numpy as np import tensorflow as tf import tensorflow_graphics.geometry.convolution.graph_pooling as gp from tensorflow_graphics.geometry.convolution.tests import utils_test from tensorflow_graphics.util import test_case def _dense_to_sparse(data): """Convert a numpy array to a tf.SparseTensor.""" return utils_test._dense_to_sparse(data) def _batch_sparse_eye(batch_shape, num_vertices, dtype): """Generate a batch of identity matrices.""" eye = np.eye(num_vertices, dtype=dtype) num_batch_dims = len(batch_shape) expand_shape = np.concatenate((np.ones( num_batch_dims, dtype=np.int32), (num_vertices, num_vertices)), axis=0) eye = np.reshape(eye, expand_shape) tile_shape = np.concatenate((batch_shape, (1, 1)), axis=0) return _dense_to_sparse(np.tile(eye, tile_shape)) class GraphPoolingTestPoolTests(test_case.TestCase): @parameterized.parameters( ("'sizes' must have an integer type.", np.float32, np.float32, np.float32), ("'data' must have a float type.", np.int32, np.float32, np.int32), ("'pool_map' and 'data' must have the same type.", np.float32, np.float64, np.int32)) def test_pool_exception_raised_types(self, err_msg, data_type, pool_map_type, sizes_type): """Tests the correct exceptions are raised for invalid types.""" data = np.ones((2, 3, 3), dtype=data_type) pool_map = _dense_to_sparse(np.ones((2, 3, 3), dtype=pool_map_type)) sizes = np.array(((1, 2), (2, 3)), dtype=sizes_type) with self.assertRaisesRegexp(TypeError, err_msg): gp.pool(data, pool_map, sizes) @parameterized.parameters( ('data must have a rank greater than 1', (3,), (3,), None), ('pool_map must have a rank of 2', (3, 3), (3,), None), ('sizes must have a rank of 3', (4, 5, 3, 2), (4, 5, 3, 3), (3, 2)), ) def test_pool_exception_raised_shapes(self, err_msg, data_shape, pool_map_shape, sizes_shape): """Tests the correct exceptions are raised for invalid shapes.""" data = np.ones(data_shape, dtype=np.float32) pool_map = _dense_to_sparse(np.ones(pool_map_shape, dtype=np.float32)) if sizes_shape is not None: sizes = np.ones(sizes_shape, dtype=np.int32) else: sizes = None with self.assertRaisesRegexp(ValueError, err_msg): gp.pool(data, pool_map, sizes) def test_pool_exception_raised_algorithm(self): """Tests the correct exception is raised for an invalid algorithm.""" data = np.ones(shape=(2, 2)) pool_map = _dense_to_sparse(np.ones(shape=(2, 2))) with self.assertRaisesRegexp( ValueError, 'The pooling method must be "weighted" or "max"'): gp.pool(data, pool_map, sizes=None, algorithm='mean') @parameterized.parameters( ((2, 3), 4, 3, np.float32), ((1,), 6, 1, np.float32), ((4, 1, 3), 9, 7, np.float64), ((2, 8, 4, 6), 19, 11, np.float64), ) def test_pool_identity(self, batch_shape, num_vertices, num_features, data_type): """Tests graph pooling with identity maps.""" data_shape = np.concatenate((batch_shape, (num_vertices, num_features))) data = np.random.uniform(size=data_shape).astype(data_type) pool_map = _batch_sparse_eye(batch_shape, num_vertices, data_type) pooled_max = gp.pool(data, pool_map, sizes=None, algorithm='max', name=None) pooled_weighted = gp.pool( data, pool_map, sizes=None, algorithm='weighted', name=None) self.assertAllClose(pooled_max, data) self.assertAllClose(pooled_weighted, data) def test_pool_preset_padded(self): """Tests pooling with preset data and padding.""" data = np.reshape(np.arange(12).astype(np.float32), (2, 3, 2)) sizes = ((2, 3), (3, 3)) pool_map = _dense_to_sparse( np.array((((0.5, 0.5, 0.), (0., 0., 1.), (0., 0., 0.)), ((1., 0., 0.), (0., 1., 0.), (0., 0., 1.))), dtype=np.float32)) pooled_max = gp.pool(data, pool_map, sizes, algorithm='max') pooled_weighted = gp.pool(data, pool_map, sizes, algorithm='weighted') true_max = (((2., 3.), (4., 5.), (0., 0.)), ((6., 7.), (8., 9.), (10., 11.))) true_weighted = (((1., 2.), (4., 5.), (0., 0.)), ((6., 7.), (8., 9.), (10., 11.))) self.assertAllClose(pooled_max, true_max) self.assertAllClose(pooled_weighted, true_weighted) def test_pool_preset(self): """Tests pooling with preset data.""" pool_map = np.array(((0.5, 0.5, 0., 0.), (0., 0., 0.5, 0.5)), dtype=np.float32) pool_map = _dense_to_sparse(pool_map) data = np.reshape(np.arange(8).astype(np.float32), (4, 2)) max_true = data[(1, 3), :] max_weighted = (data[(0, 2), :] + max_true) * 0.5 pooled_max = gp.pool(data, pool_map, sizes=None, algorithm='max', name=None) pooled_weighted = gp.pool( data, pool_map, sizes=None, algorithm='weighted', name=None) self.assertAllClose(pooled_max, max_true) self.assertAllClose(pooled_weighted, max_weighted) @parameterized.parameters((20, 10, 3), (2, 1, 1), (2, 5, 4), (2, 1, 3)) def test_pool_random(self, num_input_vertices, num_output_vertices, num_features): """Tests pooling with random inputs.""" pool_map = 0.001 + np.random.uniform( size=(num_output_vertices, num_input_vertices)) data = np.random.uniform(size=(num_input_vertices, num_features)) true_weighted = np.matmul(pool_map, data) true_max = np.tile( np.max(data, axis=0, keepdims=True), (num_output_vertices, 1)) pool_map = _dense_to_sparse(pool_map) with self.subTest(name='max'): pooled_max = gp.pool(data, pool_map, None, algorithm='max') self.assertAllClose(pooled_max, true_max) with self.subTest(name='weighted'): pooled_weighted = gp.pool(data, pool_map, None, algorithm='weighted') self.assertAllClose(pooled_weighted, true_weighted) def test_pool_jacobian(self): """Tests the jacobian is correct.""" sizes = ((2, 4), (3, 5)) data_init = np.random.uniform(size=(2, 5, 3)) pool_map = np.random.uniform(size=(2, 3, 5)) data_init[0, -1, :] = 0. pool_map[0, -1, :] = 0. pool_map = _dense_to_sparse(pool_map) def gp_pool(data, algorithm): return gp.pool(data, pool_map, sizes, algorithm=algorithm) with self.subTest(name='max'): self.assert_jacobian_is_correct_fn(lambda data: gp_pool(data, 'max'), [data_init]) with self.subTest(name='weighted'): self.assert_jacobian_is_correct_fn(lambda data: gp_pool(data, 'weighted'), [data_init]) class GraphPoolingTestUnpoolTests(test_case.TestCase): @parameterized.parameters( ("'sizes' must have an integer type.", np.float32, np.float32, np.float32), ("'data' must have a float type.", np.int32, np.float32, np.int32), ("'pool_map' and 'data' must have the same type.", np.float32, np.float64, np.int32)) def test_unpool_exception_raised_types(self, err_msg, data_type, pool_map_type, sizes_type): """Tests the correct exceptions are raised for invalid types.""" data = np.ones((2, 3, 3), dtype=data_type) pool_map = _dense_to_sparse(np.ones((2, 3, 3), dtype=pool_map_type)) sizes = np.array(((1, 2), (2, 3)), dtype=sizes_type) with self.assertRaisesRegexp(TypeError, err_msg): gp.unpool(data, pool_map, sizes) @parameterized.parameters( ('data must have a rank greater than 1', (3,), (3,), None), ('pool_map must have a rank of 2', (3, 3), (3,), None), ('sizes must have a rank of 3', (4, 5, 3, 2), (4, 5, 3, 3), (3, 2)), ('data must have a rank less than 6', (2, 3, 4, 5, 3, 2), (2, 3, 4, 5, 3, 3), None), ) def test_unpool_exception_raised_shapes(self, err_msg, data_shape, pool_map_shape, sizes_shape): """Tests the correct exceptions are raised for invalid shapes.""" data = np.ones(data_shape, dtype=np.float32) pool_map = _dense_to_sparse(np.ones(pool_map_shape, dtype=np.float32)) if sizes_shape is not None: sizes = np.ones(sizes_shape, dtype=np.int32) else: sizes = None with self.assertRaisesRegexp(ValueError, err_msg): gp.unpool(data, pool_map, sizes) @parameterized.parameters( ((2, 3), 4, 3, np.float32), ((1,), 6, 1, np.float32), ((4, 1, 3), 9, 7, np.float64), ((2, 8, 4), 19, 11, np.float64), ) def test_unpool_identity(self, batch_shape, num_vertices, num_features, data_type): """Tests graph unpooling with identity maps.""" data_shape = np.concatenate((batch_shape, (num_vertices, num_features))) data = np.random.uniform(size=data_shape).astype(data_type) pool_map = _batch_sparse_eye(batch_shape, num_vertices, data_type) unpooled = gp.unpool(data, pool_map, sizes=None) self.assertAllClose(unpooled, data) def test_unpool_preset_padded(self): """Tests pooling with preset data and padding.""" data = np.reshape(np.arange(12).astype(np.float32), (2, 3, 2)) data[0, -1, :] = 0. sizes = ((2, 3), (3, 3)) pool_map = _dense_to_sparse( np.array((((0.5, 0.5, 0.), (0., 0., 1.), (0., 0., 0.)), ((1., 0., 0.), (0., 1., 0.), (0., 0., 1.))), dtype=np.float32)) unpooled = gp.unpool(data, pool_map, sizes) true = (((0., 1.), (0., 1.), (2., 3.)), ((6., 7.), (8., 9.), (10., 11.))) self.assertAllClose(unpooled, true) @parameterized.parameters((20, 4), (2, 1), (12, 4), (6, 3)) def test_unpool_random(self, num_vertices, num_features): """Tests pooling with random data inputs.""" output_vertices = num_vertices // 2 pool_map = np.zeros(shape=(output_vertices, num_vertices), dtype=np.float32) for i in range(output_vertices): pool_map[i, (i * 2, i * 2 + 1)] = (0.5, 0.5) data = np.random.uniform(size=(output_vertices, num_features)).astype(np.float32) unpooled = gp.unpool( data, _dense_to_sparse(pool_map), sizes=None, name=None) with self.subTest(name='direct_unpool'): true = np.zeros(shape=(num_vertices, num_features)).astype(np.float32) true[0::2, :] = data true[1::2, :] = data self.assertAllClose(unpooled, true) with self.subTest(name='permute_pool_map'): permutation = np.random.permutation(num_vertices) pool_map_permute = pool_map[:, permutation] unpooled_permute = gp.unpool(data, _dense_to_sparse(pool_map_permute), None) true_permute = true[permutation, :] self.assertAllClose(unpooled_permute, true_permute) def test_unpool_jacobian_random(self): """Tests the jacobian is correct.""" sizes = ((2, 4), (3, 5)) data_init = np.random.uniform(size=(2, 3, 6)) pool_map = np.random.uniform(size=(2, 3, 5)) data_init[0, -1, :] = 0. pool_map[0, -1, :] = 0. pool_map = _dense_to_sparse(pool_map) def gp_unpool(data): return gp.unpool(data, pool_map, sizes) self.assert_jacobian_is_correct_fn(gp_unpool, [data_init]) class GraphPoolingUpsampleTransposeConvolutionTests(test_case.TestCase): @parameterized.parameters( ("'sizes' must have an integer type.", np.float32, np.float32, np.float32), ("'data' must have a float type.", np.int32, np.float32, np.int32), ("'pool_map' and 'data' must have the same type.", np.float32, np.float64, np.int32)) def test_upsample_transposed_convolution_exception_raised_types( self, err_msg, data_type, pool_map_type, sizes_type): """Tests the correct exceptions are raised for invalid types.""" data = np.ones((2, 3, 3), dtype=data_type) pool_map = _dense_to_sparse(np.ones((2, 3, 3), dtype=pool_map_type)) sizes = np.array(((1, 2), (2, 3)), dtype=sizes_type) with self.assertRaisesRegexp(TypeError, err_msg): gp.upsample_transposed_convolution( data, pool_map, sizes, kernel_size=1, transposed_convolution_op=None) @parameterized.parameters( ('data must have a rank greater than 1', (3,), (3,), None), ('pool_map must have a rank of 2', (3, 3), (3,), None), ('sizes must have a rank of 3', (4, 5, 3, 2), (4, 5, 3, 3), (3, 2)), ('data must have a rank less than 6', (2, 3, 4, 5, 3, 2), (2, 3, 4, 5, 3, 3), None), ) def test_upsample_transposed_convolution_exception_raised_shapes( self, err_msg, data_shape, pool_map_shape, sizes_shape): """Tests the correct exceptions are raised for invalid shapes.""" data = np.ones(data_shape, dtype=np.float32) pool_map = _dense_to_sparse(np.ones(pool_map_shape, dtype=np.float32)) if sizes_shape is not None: sizes = np.ones(sizes_shape, dtype=np.int32) else: sizes = None with self.assertRaisesRegexp(ValueError, err_msg): gp.upsample_transposed_convolution( data, pool_map, sizes, kernel_size=1, transposed_convolution_op=None) def test_upsample_transposed_convolution_exception_raised_callable(self): """Tests the correct exception is raised for a invalid convolution op.""" data = np.ones((5, 3)) pool_map = _dense_to_sparse(np.eye(5)) err_msg = "'transposed_convolution_op' must be callable." with self.assertRaisesRegexp(TypeError, err_msg): gp.upsample_transposed_convolution( data, pool_map, sizes=None, kernel_size=1, transposed_convolution_op=1) @parameterized.parameters((1, 1, 1, np.float32), (5, 3, 1, np.float32), (3, 6, 15, np.float64)) def test_upsample_transposed_convolution_zero_kernel(self, num_vertices, num_features, kernel_size, data_type): """Tests the upsampling with a zero kernel.""" data = np.random.uniform(size=(num_vertices, num_features)).astype(data_type) pool_map = np.zeros( shape=(num_vertices, num_vertices * kernel_size), dtype=data_type) for i in range(num_vertices): pool_map[i, np.arange(kernel_size * i, kernel_size * (i + 1))] = (1.0 / kernel_size) pool_map = _dense_to_sparse(pool_map) # Transposed convolution op with a zero kernel. transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=num_features, kernel_size=(1, kernel_size), strides=(1, kernel_size), padding='valid', use_bias=False, kernel_initializer=tf.compat.v1.keras.initializers.zeros()) upsampled = gp.upsample_transposed_convolution( data, pool_map, sizes=None, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) # Initializes variables of the transpose conv layer. self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllEqual( tf.shape(input=upsampled), (num_vertices * kernel_size, num_features)) self.assertAllEqual(upsampled, tf.zeros_like(upsampled)) @parameterized.parameters( itertools.product((3,), (6,), (3,), range(3), range(6), range(6)),) def test_upsample_transposed_convolution_selector_kernel_random( self, num_vertices, num_features, kernel_size, kernel_index, feature1_index, feature2_index): """Tests the upsampling with an indicator kernel.""" data = np.random.uniform(size=(num_vertices, num_features)).astype(np.float32) pool_map = np.zeros( shape=(num_vertices, num_vertices * kernel_size), dtype=np.float32) for i in range(num_vertices): pool_map[i, np.arange(kernel_size * i, kernel_size * (i + 1))] = (1.0 / kernel_size) pool_map = _dense_to_sparse(pool_map) selection = np.zeros( shape=(1, kernel_size, num_features, num_features), dtype=np.float32) selection[0, kernel_index, feature1_index, feature2_index] = 1. initializer = tf.compat.v1.constant_initializer(value=selection) transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=num_features, kernel_size=(1, kernel_size), strides=(1, kernel_size), padding='valid', use_bias=False, kernel_initializer=initializer) true = np.zeros( shape=(num_vertices * kernel_size, num_features), dtype=np.float32) input_column = feature2_index output_column = feature1_index output_row_start = kernel_index true[output_row_start::kernel_size, output_column] = (data[:, input_column]) upsampled = gp.upsample_transposed_convolution( data, pool_map, sizes=None, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) # Initializes variables of the transpose conv layer. self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllEqual(upsampled, true) def test_upsample_transposed_convolution_preset_padded(self): """Tests upsampling with presets.""" data = np.reshape(np.arange(12).astype(np.float32), (2, 3, 2)) data[0, -1, :] = 0. sizes = ((2, 3), (3, 3)) pool_map = _dense_to_sparse( np.array((((0.5, 0.5, 0.), (0., 0., 1.), (0., 0., 0.)), ((1., 0., 0.), (0., 1., 0.), (0., 0., 1.))), dtype=np.float32)) kernel = np.ones(shape=(1, 2, 2, 2), dtype=np.float32) initializer = tf.compat.v1.constant_initializer(value=kernel) transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=2, kernel_size=(1, 2), strides=(1, 2), padding='valid', use_bias=False, kernel_initializer=initializer) # Convolving with an all-ones kernel is equal to summation of the input. data_sum = np.tile(np.sum(data, axis=-1, keepdims=True), (1, 1, 2)) true = np.zeros(shape=(2, 3, 2), dtype=np.float32) true[0, :, :] = data_sum[0, (0, 0, 1), :] true[1, :, :] = data_sum[1, :, :] upsampled = gp.upsample_transposed_convolution( data, pool_map, sizes=sizes, kernel_size=2, transposed_convolution_op=transposed_convolution_op) # Initializes variables of the transpose conv layer. self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllEqual(upsampled.shape, (2, 3, 2)) self.assertAllClose(upsampled, true) def test_upsample_transposed_convolution_jacobian_random(self): """Tests the jacobian is correct.""" num_filters = 6 kernel_size = 1 data_init = np.random.uniform(size=(2, 5, num_filters)) pool_map = _batch_sparse_eye((2,), 5, np.float64) transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=num_filters, kernel_size=(1, kernel_size), strides=(1, kernel_size), padding='valid', dtype='float64') # Calling the upsample_transposed_convolution to create the variables # in the transposed_convoution. gp.upsample_transposed_convolution( data_init, pool_map, sizes=None, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) # Initializes variables of the transpose conv layer. self.evaluate(tf.compat.v1.global_variables_initializer()) def gp_upsample_transposed_convolution(data): return gp.upsample_transposed_convolution( data, pool_map, sizes=None, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) self.assert_jacobian_is_correct_fn(gp_upsample_transposed_convolution, [data_init]) def test_upsample_transposed_convolution_jacobian_random_padding(self): """Tests the jacobian is correct with padded data.""" num_filters = 6 sizes = ((2, 4), (3, 5)) data_init = np.random.uniform(size=(2, 3, num_filters)) data_init[0, -1, :] = 0. pool_map = np.array( (((0.5, 0.5, 0., 0., 0.), (0., 0., 0.5, 0.5, 0.), (0., 0., 0., 0., 0.)), ((1., 0., 0., 0., 0.), (0., 1. / 3., 1. / 3., 1. / 3., 0.), (0., 0., 0., 0., 1.))), dtype=data_init.dtype) pool_map = _dense_to_sparse(pool_map) kernel_size = 2 transposed_convolution_op = tf.keras.layers.Conv2DTranspose( filters=num_filters, kernel_size=(1, kernel_size), strides=(1, kernel_size), padding='valid', dtype='float64') # Calling the upsample_transposed_convolution to create the variables # in the transposed_convoution. gp.upsample_transposed_convolution( data_init, pool_map, sizes=sizes, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) # Initializes variables of the transpose conv layer. self.evaluate(tf.compat.v1.global_variables_initializer()) def gp_upsample_transposed_convolution(data): return gp.upsample_transposed_convolution( data, pool_map, sizes=sizes, kernel_size=kernel_size, transposed_convolution_op=transposed_convolution_op) self.assert_jacobian_is_correct_fn(gp_upsample_transposed_convolution, [data_init]) if __name__ == '__main__': test_case.main()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/datasets/features/voxel_feature_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for tensorflow_graphics.datasets.features.voxel_feature.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np import tensorflow as tf import tensorflow_datasets as tfds from tensorflow_graphics.datasets.features import voxel_feature _TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data') class VoxelGridFeatureTest(tfds.testing.FeatureExpectationsTestCase): """Test Cases for VoxelGrid FeatureConnector.""" def test_voxel(self): """Tests voxel I/O and encoding/decoding to DatasetFeature.""" mat_file_path = os.path.join(_TEST_DATA_DIR, 'cube.mat') expected_voxel = np.zeros((16, 16, 16), dtype=np.float32) expected_voxel[4:12, 4:12, 4:12] = 1. mat_dict = {'path': mat_file_path, 'key': 'voxels'} raising_inputs = {'path': mat_file_path, 'foo': 'voxels'} wrong_key = {'path': mat_file_path, 'key': 'foo'} wrong_path = {'path': '/somewhere/wrong', 'key': 'voxels'} wrong_dim = np.ones((1, 1, 1, 1)) self.assertFeature( feature=voxel_feature.VoxelGrid((16, 16, 16)), shape=(16, 16, 16), dtype=tf.float32, tests=[ # mat file tfds.testing.FeatureExpectationItem( value=mat_dict, expected=expected_voxel, ), # Voxel Grid tfds.testing.FeatureExpectationItem( value=expected_voxel, expected=expected_voxel, ), tfds.testing.FeatureExpectationItem( value=raising_inputs, raise_cls=ValueError, raise_msg='Missing keys in provided dictionary!', ), tfds.testing.FeatureExpectationItem( value=wrong_key, raise_cls=ValueError, raise_msg='Key `foo` not found in .mat file', ), tfds.testing.FeatureExpectationItem( value=wrong_path, raise_cls=FileNotFoundError, raise_msg='File `/somewhere/wrong` does not exist.', ), tfds.testing.FeatureExpectationItem( value=wrong_dim, raise_cls=ValueError, raise_msg='Only 3D Voxel Grids are supported.', ), ], ) if __name__ == '__main__': tfds.testing.test_main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for tensorflow_graphics.datasets.features.voxel_feature.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np import tensorflow as tf import tensorflow_datasets as tfds from tensorflow_graphics.datasets.features import voxel_feature _TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data') class VoxelGridFeatureTest(tfds.testing.FeatureExpectationsTestCase): """Test Cases for VoxelGrid FeatureConnector.""" def test_voxel(self): """Tests voxel I/O and encoding/decoding to DatasetFeature.""" mat_file_path = os.path.join(_TEST_DATA_DIR, 'cube.mat') expected_voxel = np.zeros((16, 16, 16), dtype=np.float32) expected_voxel[4:12, 4:12, 4:12] = 1. mat_dict = {'path': mat_file_path, 'key': 'voxels'} raising_inputs = {'path': mat_file_path, 'foo': 'voxels'} wrong_key = {'path': mat_file_path, 'key': 'foo'} wrong_path = {'path': '/somewhere/wrong', 'key': 'voxels'} wrong_dim = np.ones((1, 1, 1, 1)) self.assertFeature( feature=voxel_feature.VoxelGrid((16, 16, 16)), shape=(16, 16, 16), dtype=tf.float32, tests=[ # mat file tfds.testing.FeatureExpectationItem( value=mat_dict, expected=expected_voxel, ), # Voxel Grid tfds.testing.FeatureExpectationItem( value=expected_voxel, expected=expected_voxel, ), tfds.testing.FeatureExpectationItem( value=raising_inputs, raise_cls=ValueError, raise_msg='Missing keys in provided dictionary!', ), tfds.testing.FeatureExpectationItem( value=wrong_key, raise_cls=ValueError, raise_msg='Key `foo` not found in .mat file', ), tfds.testing.FeatureExpectationItem( value=wrong_path, raise_cls=FileNotFoundError, raise_msg='File `/somewhere/wrong` does not exist.', ), tfds.testing.FeatureExpectationItem( value=wrong_dim, raise_cls=ValueError, raise_msg='Only 3D Voxel Grids are supported.', ), ], ) if __name__ == '__main__': tfds.testing.test_main()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/projects/cvxnet/lib/resnet.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ResNet Architecture.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf keras = tf.keras class Resnet18(keras.Model): """ResNet-18 (V1).""" def __init__(self, feature_dims): super(Resnet18, self).__init__() self.conv1 = keras.layers.Conv2D( 64, 7, strides=2, padding='same', use_bias=False) self.bn1 = keras.layers.BatchNormalization() self.relu1 = keras.layers.ReLU() self.maxpool = keras.layers.MaxPooling2D(3, strides=2, padding='same') layers = [2, 2, 2, 2] self.layer1 = ResLayer(BasicBlock, 64, 64, layers[0]) self.layer2 = ResLayer(BasicBlock, 64, 128, layers[1], stride=2) self.layer3 = ResLayer(BasicBlock, 128, 256, layers[2], stride=2) self.layer4 = ResLayer(BasicBlock, 256, 512, layers[3], stride=2) self.fc = keras.layers.Dense(feature_dims, activation=None) def call(self, x, training=False): x = self.conv1(x) x = self.bn1(x, training=training) x = self.relu1(x) x = self.maxpool(x) x = self.layer1(x, training=training) x = self.layer2(x, training=training) x = self.layer3(x, training=training) x = self.layer4(x, training=training) x = tf.reduce_mean(x, axis=(1, 2)) x = self.fc(x) return x class ResLayer(keras.Model): """Residual Layer.""" def __init__(self, block, inplanes, planes, blocks, stride=1): super(ResLayer, self).__init__() if stride != 1 or inplanes != planes: downsample = True else: downsample = False self.conv_layers = [] self.conv_layers.append(block(planes, stride, downsample=downsample)) for unused_i in range(1, blocks): self.conv_layers.append(block(planes)) def call(self, x, training=True): for layer in self.conv_layers: x = layer(x, training=training) return x class BasicBlock(keras.Model): """Building block of resnet.""" def __init__(self, planes, stride=1, downsample=False): super(BasicBlock, self).__init__() self.conv1 = keras.layers.Conv2D( planes, 3, strides=stride, padding='same', use_bias=False) self.bn1 = keras.layers.BatchNormalization() self.conv2 = keras.layers.Conv2D(planes, 3, padding='same', use_bias=False) self.bn2 = keras.layers.BatchNormalization() if downsample: self.downsample = downsample self.dconv1 = keras.layers.Conv2D( planes, 1, strides=stride, padding='same', use_bias=False) self.dbn1 = keras.layers.BatchNormalization() else: self.downsample = downsample def call(self, x, training=True): residual = x if self.downsample: residual = self.dconv1(residual) residual = self.dbn1(residual, training=training) x = self.conv1(x) x = self.bn1(x, training=training) x = tf.nn.relu(x) x = self.conv2(x) x = self.bn2(x, training=training) x += residual x = tf.nn.relu(x) return x
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ResNet Architecture.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf keras = tf.keras class Resnet18(keras.Model): """ResNet-18 (V1).""" def __init__(self, feature_dims): super(Resnet18, self).__init__() self.conv1 = keras.layers.Conv2D( 64, 7, strides=2, padding='same', use_bias=False) self.bn1 = keras.layers.BatchNormalization() self.relu1 = keras.layers.ReLU() self.maxpool = keras.layers.MaxPooling2D(3, strides=2, padding='same') layers = [2, 2, 2, 2] self.layer1 = ResLayer(BasicBlock, 64, 64, layers[0]) self.layer2 = ResLayer(BasicBlock, 64, 128, layers[1], stride=2) self.layer3 = ResLayer(BasicBlock, 128, 256, layers[2], stride=2) self.layer4 = ResLayer(BasicBlock, 256, 512, layers[3], stride=2) self.fc = keras.layers.Dense(feature_dims, activation=None) def call(self, x, training=False): x = self.conv1(x) x = self.bn1(x, training=training) x = self.relu1(x) x = self.maxpool(x) x = self.layer1(x, training=training) x = self.layer2(x, training=training) x = self.layer3(x, training=training) x = self.layer4(x, training=training) x = tf.reduce_mean(x, axis=(1, 2)) x = self.fc(x) return x class ResLayer(keras.Model): """Residual Layer.""" def __init__(self, block, inplanes, planes, blocks, stride=1): super(ResLayer, self).__init__() if stride != 1 or inplanes != planes: downsample = True else: downsample = False self.conv_layers = [] self.conv_layers.append(block(planes, stride, downsample=downsample)) for unused_i in range(1, blocks): self.conv_layers.append(block(planes)) def call(self, x, training=True): for layer in self.conv_layers: x = layer(x, training=training) return x class BasicBlock(keras.Model): """Building block of resnet.""" def __init__(self, planes, stride=1, downsample=False): super(BasicBlock, self).__init__() self.conv1 = keras.layers.Conv2D( planes, 3, strides=stride, padding='same', use_bias=False) self.bn1 = keras.layers.BatchNormalization() self.conv2 = keras.layers.Conv2D(planes, 3, padding='same', use_bias=False) self.bn2 = keras.layers.BatchNormalization() if downsample: self.downsample = downsample self.dconv1 = keras.layers.Conv2D( planes, 1, strides=stride, padding='same', use_bias=False) self.dbn1 = keras.layers.BatchNormalization() else: self.downsample = downsample def call(self, x, training=True): residual = x if self.downsample: residual = self.dconv1(residual) residual = self.dbn1(residual, training=training) x = self.conv1(x) x = self.bn1(x, training=training) x = tf.nn.relu(x) x = self.conv2(x) x = self.bn2(x, training=training) x += residual x = tf.nn.relu(x) return x
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/math/math_helpers.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module contains math routines that are shared by across different modules.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import safe_ops from tensorflow_graphics.util import shape def cartesian_to_spherical_coordinates(point_cartesian, eps=None, name=None): """Function to transform Cartesian coordinates to spherical coordinates. This function assumes a right handed coordinate system with `z` pointing up. When `x` and `y` are both `0`, the function outputs `0` for `phi`. Note that the function is not smooth when `x = y = 0`. Note: In the following, A1 to An are optional batch dimensions. Args: point_cartesian: A tensor of shape `[A1, ..., An, 3]`. In the last dimension, the data follows the `x`, `y`, `z` order. eps: A small `float`, to be added to the denominator. If left as `None`, its value is automatically selected using `point_cartesian.dtype`. name: A name for this op. Defaults to `cartesian_to_spherical_coordinates`. Returns: A tensor of shape `[A1, ..., An, 3]`. The last dimensions contains (`r`,`theta`,`phi`), where `r` is the sphere radius, `theta` is the polar angle and `phi` is the azimuthal angle. Returns `NaN` gradient if x = y = 0. """ with tf.compat.v1.name_scope(name, "cartesian_to_spherical_coordinates", [point_cartesian]): point_cartesian = tf.convert_to_tensor(value=point_cartesian) shape.check_static( tensor=point_cartesian, tensor_name="point_cartesian", has_dim_equals=(-1, 3)) x, y, z = tf.unstack(point_cartesian, axis=-1) radius = tf.norm(tensor=point_cartesian, axis=-1) theta = tf.acos( tf.clip_by_value(safe_ops.safe_unsigned_div(z, radius, eps), -1., 1.)) phi = tf.atan2(y, x) return tf.stack((radius, theta, phi), axis=-1) def _double_factorial_loop_body(n, result, two): result = tf.compat.v1.where(tf.greater_equal(n, two), result * n, result) return n - two, result, two def _double_factorial_loop_condition(n, result, two): del result # Unused return tf.cast(tf.math.count_nonzero(tf.greater_equal(n, two)), tf.bool) def double_factorial(n): """Computes the double factorial of `n`. Note: In the following, A1 to An are optional batch dimensions. Args: n: A tensor of shape `[A1, ..., An]` containing positive integer values. Returns: A tensor of shape `[A1, ..., An]` containing the double factorial of `n`. """ n = tf.convert_to_tensor(value=n) two = tf.ones_like(n) * 2 result = tf.ones_like(n) _, result, _ = tf.while_loop( cond=_double_factorial_loop_condition, body=_double_factorial_loop_body, loop_vars=[n, result, two]) return result def factorial(n): """Computes the factorial of `n`. Note: In the following, A1 to An are optional batch dimensions. Args: n: A tensor of shape `[A1, ..., An]`. Returns: A tensor of shape `[A1, ..., An]`. """ n = tf.convert_to_tensor(value=n) return tf.exp(tf.math.lgamma(n + 1)) def spherical_to_cartesian_coordinates(point_spherical, name=None): """Function to transform Cartesian coordinates to spherical coordinates. Note: In the following, A1 to An are optional batch dimensions. Args: point_spherical: A tensor of shape `[A1, ..., An, 3]`. The last dimension contains r, theta, and phi that respectively correspond to the radius, polar angle and azimuthal angle; r must be non-negative. name: A name for this op. Defaults to 'spherical_to_cartesian_coordinates'. Raises: tf.errors.InvalidArgumentError: If r, theta or phi contains out of range data. Returns: A tensor of shape `[A1, ..., An, 3]`, where the last dimension contains the cartesian coordinates in x,y,z order. """ with tf.compat.v1.name_scope(name, "spherical_to_cartesian_coordinates", [point_spherical]): point_spherical = tf.convert_to_tensor(value=point_spherical) shape.check_static( tensor=point_spherical, tensor_name="point_spherical", has_dim_equals=(-1, 3)) r, theta, phi = tf.unstack(point_spherical, axis=-1) r = asserts.assert_all_above(r, 0) tmp = r * tf.sin(theta) x = tmp * tf.cos(phi) y = tmp * tf.sin(phi) z = r * tf.cos(theta) return tf.stack((x, y, z), axis=-1) def square_to_spherical_coordinates(point_2d, name=None): """Maps points from a unit square to a unit sphere. Note: In the following, A1 to An are optional batch dimensions. Args: point_2d: A tensor of shape `[A1, ..., An, 2]` with values in [0,1]. name: A name for this op. Defaults to "math_square_to_spherical_coordinates". Returns: A tensor of shape `[A1, ..., An, 2]` with [..., 0] having values in [0.0, pi] and [..., 1] with values in [0.0, 2pi]. Raises: ValueError: if the shape of `point_2d` is not supported. InvalidArgumentError: if at least an element of `point_2d` is outside of [0,1]. """ with tf.compat.v1.name_scope(name, "math_square_to_spherical_coordinates", [point_2d]): point_2d = tf.convert_to_tensor(value=point_2d) shape.check_static( tensor=point_2d, tensor_name="point_2d", has_dim_equals=(-1, 2)) point_2d = asserts.assert_all_in_range( point_2d, 0.0, 1.0, open_bounds=False) x, y = tf.unstack(point_2d, axis=-1) theta = 2.0 * tf.acos(tf.sqrt(1.0 - x)) phi = 2.0 * np.pi * y return tf.stack((tf.ones_like(theta), theta, phi), axis=-1) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module contains math routines that are shared by across different modules.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import safe_ops from tensorflow_graphics.util import shape def cartesian_to_spherical_coordinates(point_cartesian, eps=None, name=None): """Function to transform Cartesian coordinates to spherical coordinates. This function assumes a right handed coordinate system with `z` pointing up. When `x` and `y` are both `0`, the function outputs `0` for `phi`. Note that the function is not smooth when `x = y = 0`. Note: In the following, A1 to An are optional batch dimensions. Args: point_cartesian: A tensor of shape `[A1, ..., An, 3]`. In the last dimension, the data follows the `x`, `y`, `z` order. eps: A small `float`, to be added to the denominator. If left as `None`, its value is automatically selected using `point_cartesian.dtype`. name: A name for this op. Defaults to `cartesian_to_spherical_coordinates`. Returns: A tensor of shape `[A1, ..., An, 3]`. The last dimensions contains (`r`,`theta`,`phi`), where `r` is the sphere radius, `theta` is the polar angle and `phi` is the azimuthal angle. Returns `NaN` gradient if x = y = 0. """ with tf.compat.v1.name_scope(name, "cartesian_to_spherical_coordinates", [point_cartesian]): point_cartesian = tf.convert_to_tensor(value=point_cartesian) shape.check_static( tensor=point_cartesian, tensor_name="point_cartesian", has_dim_equals=(-1, 3)) x, y, z = tf.unstack(point_cartesian, axis=-1) radius = tf.norm(tensor=point_cartesian, axis=-1) theta = tf.acos( tf.clip_by_value(safe_ops.safe_unsigned_div(z, radius, eps), -1., 1.)) phi = tf.atan2(y, x) return tf.stack((radius, theta, phi), axis=-1) def _double_factorial_loop_body(n, result, two): result = tf.compat.v1.where(tf.greater_equal(n, two), result * n, result) return n - two, result, two def _double_factorial_loop_condition(n, result, two): del result # Unused return tf.cast(tf.math.count_nonzero(tf.greater_equal(n, two)), tf.bool) def double_factorial(n): """Computes the double factorial of `n`. Note: In the following, A1 to An are optional batch dimensions. Args: n: A tensor of shape `[A1, ..., An]` containing positive integer values. Returns: A tensor of shape `[A1, ..., An]` containing the double factorial of `n`. """ n = tf.convert_to_tensor(value=n) two = tf.ones_like(n) * 2 result = tf.ones_like(n) _, result, _ = tf.while_loop( cond=_double_factorial_loop_condition, body=_double_factorial_loop_body, loop_vars=[n, result, two]) return result def factorial(n): """Computes the factorial of `n`. Note: In the following, A1 to An are optional batch dimensions. Args: n: A tensor of shape `[A1, ..., An]`. Returns: A tensor of shape `[A1, ..., An]`. """ n = tf.convert_to_tensor(value=n) return tf.exp(tf.math.lgamma(n + 1)) def spherical_to_cartesian_coordinates(point_spherical, name=None): """Function to transform Cartesian coordinates to spherical coordinates. Note: In the following, A1 to An are optional batch dimensions. Args: point_spherical: A tensor of shape `[A1, ..., An, 3]`. The last dimension contains r, theta, and phi that respectively correspond to the radius, polar angle and azimuthal angle; r must be non-negative. name: A name for this op. Defaults to 'spherical_to_cartesian_coordinates'. Raises: tf.errors.InvalidArgumentError: If r, theta or phi contains out of range data. Returns: A tensor of shape `[A1, ..., An, 3]`, where the last dimension contains the cartesian coordinates in x,y,z order. """ with tf.compat.v1.name_scope(name, "spherical_to_cartesian_coordinates", [point_spherical]): point_spherical = tf.convert_to_tensor(value=point_spherical) shape.check_static( tensor=point_spherical, tensor_name="point_spherical", has_dim_equals=(-1, 3)) r, theta, phi = tf.unstack(point_spherical, axis=-1) r = asserts.assert_all_above(r, 0) tmp = r * tf.sin(theta) x = tmp * tf.cos(phi) y = tmp * tf.sin(phi) z = r * tf.cos(theta) return tf.stack((x, y, z), axis=-1) def square_to_spherical_coordinates(point_2d, name=None): """Maps points from a unit square to a unit sphere. Note: In the following, A1 to An are optional batch dimensions. Args: point_2d: A tensor of shape `[A1, ..., An, 2]` with values in [0,1]. name: A name for this op. Defaults to "math_square_to_spherical_coordinates". Returns: A tensor of shape `[A1, ..., An, 2]` with [..., 0] having values in [0.0, pi] and [..., 1] with values in [0.0, 2pi]. Raises: ValueError: if the shape of `point_2d` is not supported. InvalidArgumentError: if at least an element of `point_2d` is outside of [0,1]. """ with tf.compat.v1.name_scope(name, "math_square_to_spherical_coordinates", [point_2d]): point_2d = tf.convert_to_tensor(value=point_2d) shape.check_static( tensor=point_2d, tensor_name="point_2d", has_dim_equals=(-1, 2)) point_2d = asserts.assert_all_in_range( point_2d, 0.0, 1.0, open_bounds=False) x, y = tf.unstack(point_2d, axis=-1) theta = 2.0 * tf.acos(tf.sqrt(1.0 - x)) phi = 2.0 * np.pi * y return tf.stack((tf.ones_like(theta), theta, phi), axis=-1) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/geometry/transformation/axis_angle.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""This module implements axis-angle functionalities. The axis-angle representation is defined as $$\theta\mathbf{a}$$, where $$\mathbf{a}$$ is a unit vector indicating the direction of rotation and $$\theta$$ is a scalar controlling the angle of rotation. It is important to note that the axis-angle does not perform rotation by itself, but that it can be used to rotate any given vector $$\mathbf{v} \in {\mathbb{R}^3}$$ into a vector $$\mathbf{v}'$$ using the Rodrigues' rotation formula: $$\mathbf{v}'=\mathbf{v}\cos(\theta)+(\mathbf{a}\times\mathbf{v})\sin(\theta) +\mathbf{a}(\mathbf{a}\cdot\mathbf{v})(1-\cos(\theta)).$$ More details about the axis-angle formalism can be found on [this page.] (https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation) Note: Some of the functions defined in the module expect a normalized axis $$\mathbf{a} = [x, y, z]^T$$ as inputs where $$x^2 + y^2 + z^2 = 1$$. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.geometry.transformation import quaternion as quaternion_lib from tensorflow_graphics.geometry.transformation import rotation_matrix_3d from tensorflow_graphics.math import vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import safe_ops from tensorflow_graphics.util import shape def from_euler(angles, name=None): r"""Converts Euler angles to an axis-angle representation. Note: The conversion is performed by first converting to a quaternion representation, and then by converting the quaternion to an axis-angle. Note: In the following, A1 to An are optional batch dimensions. Args: angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the three Euler angles. `[A1, ..., An, 0]` is the angle about `x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians and `[A1, ..., An, 2]` is the angle about `z` in radians. name: A name for this op that defaults to "axis_angle_from_euler". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. """ with tf.compat.v1.name_scope(name, "axis_angle_from_euler", [angles]): quaternion = quaternion_lib.from_euler(angles) return from_quaternion(quaternion) def from_euler_with_small_angles_approximation(angles, name=None): r"""Converts small Euler angles to an axis-angle representation. Under the small angle assumption, $$\sin(x)$$ and $$\cos(x)$$ can be approximated by their second order Taylor expansions, where $$\sin(x) \approx x$$ and $$\cos(x) \approx 1 - \frac{x^2}{2}$$. In the current implementation, the smallness of the angles is not verified. Note: The conversion is performed by first converting to a quaternion representation, and then by converting the quaternion to an axis-angle. Note: In the following, A1 to An are optional batch dimensions. Args: angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the three small Euler angles. `[A1, ..., An, 0]` is the angle about `x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians and `[A1, ..., An, 2]` is the angle about `z` in radians. name: A name for this op that defaults to "axis_angle_from_euler_with_small_angles_approximation". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. """ with tf.compat.v1.name_scope( name, "axis_angle_from_euler_with_small_angles_approximation", [angles]): quaternion = quaternion_lib.from_euler_with_small_angles_approximation( angles) return from_quaternion(quaternion) def from_quaternion(quaternion, name=None): """Converts a quaternion to an axis-angle representation. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. name: A name for this op that defaults to "axis_angle_from_quaternion". Returns: Tuple of two tensors of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. Raises: ValueError: If the shape of `quaternion` is not supported. """ with tf.compat.v1.name_scope(name, "axis_angle_from_quaternion", [quaternion]): quaternion = tf.convert_to_tensor(value=quaternion) shape.check_static( tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4)) quaternion = asserts.assert_normalized(quaternion) # This prevents zero norm xyz and zero w, and is differentiable. quaternion += asserts.select_eps_for_addition(quaternion.dtype) xyz, w = tf.split(quaternion, (3, 1), axis=-1) norm = tf.norm(tensor=xyz, axis=-1, keepdims=True) angle = 2.0 * tf.atan2(norm, tf.abs(w)) axis = safe_ops.safe_unsigned_div(safe_ops.nonzero_sign(w) * xyz, norm) return axis, angle def from_rotation_matrix(rotation_matrix, name=None): """Converts a rotation matrix to an axis-angle representation. Note: In the current version the returned axis-angle representation is not unique for a given rotation matrix. Since a direct conversion would not really be faster, we first transform the rotation matrix to a quaternion, and finally perform the conversion from that quaternion to the corresponding axis-angle representation. Note: In the following, A1 to An are optional batch dimensions. Args: rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`, where the last two dimensions represent a rotation matrix. name: A name for this op that defaults to "axis_angle_from_rotation_matrix". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. Raises: ValueError: If the shape of `rotation_matrix` is not supported. """ with tf.compat.v1.name_scope(name, "axis_angle_from_rotation_matrix", [rotation_matrix]): rotation_matrix = tf.convert_to_tensor(value=rotation_matrix) shape.check_static( tensor=rotation_matrix, tensor_name="rotation_matrix", has_rank_greater_than=1, has_dim_equals=((-2, 3), (-1, 3))) rotation_matrix = rotation_matrix_3d.assert_rotation_matrix_normalized( rotation_matrix) quaternion = quaternion_lib.from_rotation_matrix(rotation_matrix) return from_quaternion(quaternion) def inverse(axis, angle, name=None): """Computes the axis-angle that is the inverse of the input axis-angle. Note: In the following, A1 to An are optional batch dimensions. Args: axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized axis. angle: A tensor of shape `[A1, ..., An, 1]` where the last dimension represents an angle. name: A name for this op that defaults to "axis_angle_inverse". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. Raises: ValueError: If the shape of `axis` or `angle` is not supported. """ with tf.compat.v1.name_scope(name, "axis_angle_inverse", [axis, angle]): axis = tf.convert_to_tensor(value=axis) angle = tf.convert_to_tensor(value=angle) shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3)) shape.check_static( tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1)) shape.compare_batch_dimensions( tensors=(axis, angle), tensor_names=("axis", "angle"), last_axes=-2, broadcast_compatible=True) axis = asserts.assert_normalized(axis) return axis, -angle def is_normalized(axis, angle, atol=1e-3, name=None): """Determines if the axis-angle is normalized or not. Note: In the following, A1 to An are optional batch dimensions. Args: axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized axis. angle: A tensor of shape `[A1, ..., An, 1]` where the last dimension represents an angle. atol: The absolute tolerance parameter. name: A name for this op that defaults to "axis_angle_is_normalized". Returns: A tensor of shape `[A1, ..., An, 1]`, where False indicates that the axis is not normalized. """ with tf.compat.v1.name_scope(name, "axis_angle_is_normalized", [axis, angle]): axis = tf.convert_to_tensor(value=axis) angle = tf.convert_to_tensor(value=angle) shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3)) shape.check_static( tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1)) shape.compare_batch_dimensions( tensors=(axis, angle), tensor_names=("axis", "angle"), last_axes=-2, broadcast_compatible=True) norms = tf.norm(tensor=axis, axis=-1, keepdims=True) return tf.abs(norms - 1.) < atol def rotate(point, axis, angle, name=None): r"""Rotates a 3d point using an axis-angle by applying the Rodrigues' formula. Rotates a vector $$\mathbf{v} \in {\mathbb{R}^3}$$ into a vector $$\mathbf{v}' \in {\mathbb{R}^3}$$ using the Rodrigues' rotation formula: $$\mathbf{v}'=\mathbf{v}\cos(\theta)+(\mathbf{a}\times\mathbf{v})\sin(\theta) +\mathbf{a}(\mathbf{a}\cdot\mathbf{v})(1-\cos(\theta)).$$ Note: In the following, A1 to An are optional batch dimensions. Args: point: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a 3d point to rotate. axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized axis. angle: A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents an angle. name: A name for this op that defaults to "axis_angle_rotate". Returns: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a 3d point. Raises: ValueError: If `point`, `axis`, or `angle` are of different shape or if their respective shape is not supported. """ with tf.compat.v1.name_scope(name, "axis_angle_rotate", [point, axis, angle]): point = tf.convert_to_tensor(value=point) axis = tf.convert_to_tensor(value=axis) angle = tf.convert_to_tensor(value=angle) shape.check_static( tensor=point, tensor_name="point", has_dim_equals=(-1, 3)) shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3)) shape.check_static( tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1)) shape.compare_batch_dimensions( tensors=(point, axis, angle), tensor_names=("point", "axis", "angle"), last_axes=-2, broadcast_compatible=True) axis = asserts.assert_normalized(axis) cos_angle = tf.cos(angle) axis_dot_point = vector.dot(axis, point) return point * cos_angle + vector.cross( axis, point) * tf.sin(angle) + axis * axis_dot_point * (1.0 - cos_angle) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""This module implements axis-angle functionalities. The axis-angle representation is defined as $$\theta\mathbf{a}$$, where $$\mathbf{a}$$ is a unit vector indicating the direction of rotation and $$\theta$$ is a scalar controlling the angle of rotation. It is important to note that the axis-angle does not perform rotation by itself, but that it can be used to rotate any given vector $$\mathbf{v} \in {\mathbb{R}^3}$$ into a vector $$\mathbf{v}'$$ using the Rodrigues' rotation formula: $$\mathbf{v}'=\mathbf{v}\cos(\theta)+(\mathbf{a}\times\mathbf{v})\sin(\theta) +\mathbf{a}(\mathbf{a}\cdot\mathbf{v})(1-\cos(\theta)).$$ More details about the axis-angle formalism can be found on [this page.] (https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation) Note: Some of the functions defined in the module expect a normalized axis $$\mathbf{a} = [x, y, z]^T$$ as inputs where $$x^2 + y^2 + z^2 = 1$$. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.geometry.transformation import quaternion as quaternion_lib from tensorflow_graphics.geometry.transformation import rotation_matrix_3d from tensorflow_graphics.math import vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import safe_ops from tensorflow_graphics.util import shape def from_euler(angles, name=None): r"""Converts Euler angles to an axis-angle representation. Note: The conversion is performed by first converting to a quaternion representation, and then by converting the quaternion to an axis-angle. Note: In the following, A1 to An are optional batch dimensions. Args: angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the three Euler angles. `[A1, ..., An, 0]` is the angle about `x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians and `[A1, ..., An, 2]` is the angle about `z` in radians. name: A name for this op that defaults to "axis_angle_from_euler". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. """ with tf.compat.v1.name_scope(name, "axis_angle_from_euler", [angles]): quaternion = quaternion_lib.from_euler(angles) return from_quaternion(quaternion) def from_euler_with_small_angles_approximation(angles, name=None): r"""Converts small Euler angles to an axis-angle representation. Under the small angle assumption, $$\sin(x)$$ and $$\cos(x)$$ can be approximated by their second order Taylor expansions, where $$\sin(x) \approx x$$ and $$\cos(x) \approx 1 - \frac{x^2}{2}$$. In the current implementation, the smallness of the angles is not verified. Note: The conversion is performed by first converting to a quaternion representation, and then by converting the quaternion to an axis-angle. Note: In the following, A1 to An are optional batch dimensions. Args: angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents the three small Euler angles. `[A1, ..., An, 0]` is the angle about `x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians and `[A1, ..., An, 2]` is the angle about `z` in radians. name: A name for this op that defaults to "axis_angle_from_euler_with_small_angles_approximation". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. """ with tf.compat.v1.name_scope( name, "axis_angle_from_euler_with_small_angles_approximation", [angles]): quaternion = quaternion_lib.from_euler_with_small_angles_approximation( angles) return from_quaternion(quaternion) def from_quaternion(quaternion, name=None): """Converts a quaternion to an axis-angle representation. Note: In the following, A1 to An are optional batch dimensions. Args: quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension represents a normalized quaternion. name: A name for this op that defaults to "axis_angle_from_quaternion". Returns: Tuple of two tensors of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. Raises: ValueError: If the shape of `quaternion` is not supported. """ with tf.compat.v1.name_scope(name, "axis_angle_from_quaternion", [quaternion]): quaternion = tf.convert_to_tensor(value=quaternion) shape.check_static( tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4)) quaternion = asserts.assert_normalized(quaternion) # This prevents zero norm xyz and zero w, and is differentiable. quaternion += asserts.select_eps_for_addition(quaternion.dtype) xyz, w = tf.split(quaternion, (3, 1), axis=-1) norm = tf.norm(tensor=xyz, axis=-1, keepdims=True) angle = 2.0 * tf.atan2(norm, tf.abs(w)) axis = safe_ops.safe_unsigned_div(safe_ops.nonzero_sign(w) * xyz, norm) return axis, angle def from_rotation_matrix(rotation_matrix, name=None): """Converts a rotation matrix to an axis-angle representation. Note: In the current version the returned axis-angle representation is not unique for a given rotation matrix. Since a direct conversion would not really be faster, we first transform the rotation matrix to a quaternion, and finally perform the conversion from that quaternion to the corresponding axis-angle representation. Note: In the following, A1 to An are optional batch dimensions. Args: rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`, where the last two dimensions represent a rotation matrix. name: A name for this op that defaults to "axis_angle_from_rotation_matrix". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. Raises: ValueError: If the shape of `rotation_matrix` is not supported. """ with tf.compat.v1.name_scope(name, "axis_angle_from_rotation_matrix", [rotation_matrix]): rotation_matrix = tf.convert_to_tensor(value=rotation_matrix) shape.check_static( tensor=rotation_matrix, tensor_name="rotation_matrix", has_rank_greater_than=1, has_dim_equals=((-2, 3), (-1, 3))) rotation_matrix = rotation_matrix_3d.assert_rotation_matrix_normalized( rotation_matrix) quaternion = quaternion_lib.from_rotation_matrix(rotation_matrix) return from_quaternion(quaternion) def inverse(axis, angle, name=None): """Computes the axis-angle that is the inverse of the input axis-angle. Note: In the following, A1 to An are optional batch dimensions. Args: axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized axis. angle: A tensor of shape `[A1, ..., An, 1]` where the last dimension represents an angle. name: A name for this op that defaults to "axis_angle_inverse". Returns: A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`, where the first tensor represents the axis, and the second represents the angle. The resulting axis is a normalized vector. Raises: ValueError: If the shape of `axis` or `angle` is not supported. """ with tf.compat.v1.name_scope(name, "axis_angle_inverse", [axis, angle]): axis = tf.convert_to_tensor(value=axis) angle = tf.convert_to_tensor(value=angle) shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3)) shape.check_static( tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1)) shape.compare_batch_dimensions( tensors=(axis, angle), tensor_names=("axis", "angle"), last_axes=-2, broadcast_compatible=True) axis = asserts.assert_normalized(axis) return axis, -angle def is_normalized(axis, angle, atol=1e-3, name=None): """Determines if the axis-angle is normalized or not. Note: In the following, A1 to An are optional batch dimensions. Args: axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized axis. angle: A tensor of shape `[A1, ..., An, 1]` where the last dimension represents an angle. atol: The absolute tolerance parameter. name: A name for this op that defaults to "axis_angle_is_normalized". Returns: A tensor of shape `[A1, ..., An, 1]`, where False indicates that the axis is not normalized. """ with tf.compat.v1.name_scope(name, "axis_angle_is_normalized", [axis, angle]): axis = tf.convert_to_tensor(value=axis) angle = tf.convert_to_tensor(value=angle) shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3)) shape.check_static( tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1)) shape.compare_batch_dimensions( tensors=(axis, angle), tensor_names=("axis", "angle"), last_axes=-2, broadcast_compatible=True) norms = tf.norm(tensor=axis, axis=-1, keepdims=True) return tf.abs(norms - 1.) < atol def rotate(point, axis, angle, name=None): r"""Rotates a 3d point using an axis-angle by applying the Rodrigues' formula. Rotates a vector $$\mathbf{v} \in {\mathbb{R}^3}$$ into a vector $$\mathbf{v}' \in {\mathbb{R}^3}$$ using the Rodrigues' rotation formula: $$\mathbf{v}'=\mathbf{v}\cos(\theta)+(\mathbf{a}\times\mathbf{v})\sin(\theta) +\mathbf{a}(\mathbf{a}\cdot\mathbf{v})(1-\cos(\theta)).$$ Note: In the following, A1 to An are optional batch dimensions. Args: point: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a 3d point to rotate. axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a normalized axis. angle: A tensor of shape `[A1, ..., An, 1]`, where the last dimension represents an angle. name: A name for this op that defaults to "axis_angle_rotate". Returns: A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents a 3d point. Raises: ValueError: If `point`, `axis`, or `angle` are of different shape or if their respective shape is not supported. """ with tf.compat.v1.name_scope(name, "axis_angle_rotate", [point, axis, angle]): point = tf.convert_to_tensor(value=point) axis = tf.convert_to_tensor(value=axis) angle = tf.convert_to_tensor(value=angle) shape.check_static( tensor=point, tensor_name="point", has_dim_equals=(-1, 3)) shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3)) shape.check_static( tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1)) shape.compare_batch_dimensions( tensors=(point, axis, angle), tensor_names=("point", "axis", "angle"), last_axes=-2, broadcast_compatible=True) axis = asserts.assert_normalized(axis) cos_angle = tf.cos(angle) axis_dot_point = vector.dot(axis, point) return point * cos_angle + vector.cross( axis, point) * tf.sin(angle) + axis * axis_dot_point * (1.0 - cos_angle) # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/math/interpolation/tests/trilinear_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for trilinear interpolation.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.representation import grid from tensorflow_graphics.geometry.transformation import rotation_matrix_3d from tensorflow_graphics.math.interpolation import trilinear from tensorflow_graphics.util import test_case def _sampling_points_from_grid(grid_size, dtype=tf.float64): """Returns a tensor of shape `[M, 3]`, with M the number of sampling points.""" sampling_points = grid.generate((-1.0, -1.0, -1.0), (1.0, 1.0, 1.0), grid_size) sampling_points = tf.cast(sampling_points, dtype) return tf.reshape(sampling_points, [-1, 3]) def _transpose_last_two_dims(sampling_points): axes = [i for i in range(len(sampling_points.shape))] axes[-1], axes[-2] = axes[-2], axes[-1] sampling_points = tf.transpose(a=sampling_points, perm=axes) return sampling_points def _sampling_points_in_volume(sampling_points, voxel_size): """Transforms the sampling points from [-1, 1] to [0, voxel_size].""" voxel_size = tf.convert_to_tensor(value=voxel_size) max_size = tf.cast(voxel_size - 1, sampling_points.dtype) return 0.5 * ((sampling_points + 1) * max_size) ANGLE_90 = np.array((np.pi / 2.,)) def _get_random_voxel_grid(voxel_size): return np.random.uniform(size=voxel_size) def _get_random_sampling_points(sampling_points_size, max_grid_dim): random_grid = np.random.randint(0, max_grid_dim, size=sampling_points_size) return random_grid.astype(np.float64) def _generate_voxels_horizontal_plane(voxel_size): voxels = np.zeros(voxel_size) mid_x = int(np.floor(voxel_size[0] / 2)) voxels[mid_x, :, :, :] = 1 if voxel_size[0] % 2 == 0: voxels[mid_x - 1, :, :, :] = 1 return voxels def _generate_voxels_vertical_plane(voxel_size): voxels = np.zeros(voxel_size) mid_y = int(np.floor(voxel_size[1] / 2)) voxels[:, mid_y, :, :] = 1 if voxel_size[1] % 2 == 0: voxels[:, mid_y - 1, :, :] = 1 return voxels def _generate_voxel_cube(dims, plane_orientation=None): if plane_orientation == "horizontal": voxels_no_batch = _generate_voxels_horizontal_plane(dims[-4:]) elif plane_orientation == "vertical": voxels_no_batch = _generate_voxels_vertical_plane(dims[-4:]) else: voxels_no_batch = np.zeros(dims[-4:]) voxels = np.zeros(dims) voxels[..., :, :, :, :] = voxels_no_batch return voxels class TrilinearTest(test_case.TestCase): @parameterized.parameters( ("must have a rank greater than 3", ((5, 5, 5), (125, 3))), ("must have a rank greater than 1", ((2, 5, 5, 5, 1), (3,))), ("must have exactly 3 dimensions in axis -1", ((2, 5, 5, 5, 1), (2, 125, 4))), ("Not all batch dimensions are broadcast-compatible.", ((2, 2, 5, 5, 5, 1), (2, 3, 125, 3))), ) def test_interpolate_exception_raised(self, error_msg, shapes): """Tests whether exceptions are raised for incompatible shapes.""" self.assert_exception_is_raised( trilinear.interpolate, error_msg, shapes=shapes) @parameterized.parameters( ((5, 5, 5, 3), (125, 3)), ((2, 5, 5, 5, 3), (2, 125, 3)), ((2, 2, 5, 5, 5, 3), (2, 2, 15, 3)), ) def test_interpolate_exception_not_raised(self, *shapes): """Tests whether exceptions are not raised for compatible shapes.""" self.assert_exception_is_not_raised(trilinear.interpolate, shapes) def test_interpolation_values_preset(self): voxels = np.zeros((2, 2, 2, 1)) voxels[(0, 1, 1, 0), (0, 0, 1, 1), (0, 0, 0, 0), 0] = 1 sampling_points = np.array(((0, 0, 0), (0.5, 0, 0), (1.0, 0, 0), (0., 0, 0.25), (0., 0, 0.5), (0., 0, 0.75), (0., 0, 1.0), (0., 0, 2.0), (-1.0, -0.5, 0), (0, 2, 1.5))) correct_values = np.array( ((1.0, 1.0, 1.0, 0.75, 0.5, 0.25, 0.0, 0.0, 1.0, 0.0),)).T self.assert_output_is_correct( trilinear.interpolate, (voxels, sampling_points), (correct_values,), tile=False) def test_interpolation_preset(self): """Tests whether interpolation results are correct.""" batch_dim_size = np.random.randint(0, 4) batch_dims = list(np.random.randint(1, 10, size=batch_dim_size)) cube_single_dim = np.random.randint(3, 10) cube_dims = [cube_single_dim, cube_single_dim, cube_single_dim] num_channels = [np.random.randint(1, 10)] combined_dims = batch_dims + cube_dims + num_channels voxels_in = _generate_voxel_cube(combined_dims, "horizontal") euler_angles = np.zeros(batch_dims + [3]) euler_angles[..., 2] = np.pi / 2. voxels_out = _generate_voxel_cube(combined_dims, "vertical") transformation_matrix = rotation_matrix_3d.from_euler(euler_angles) grid_size = (cube_single_dim, cube_single_dim, cube_single_dim) sampling_points = _sampling_points_from_grid(grid_size) sampling_points = tf.matmul(transformation_matrix, tf.transpose(a=sampling_points)) sampling_points = _transpose_last_two_dims(sampling_points) sampling_points = _sampling_points_in_volume(sampling_points, voxels_in.shape[-4:-1]) voxels_out = tf.reshape(voxels_out, batch_dims + [cube_single_dim**3] + num_channels) self.assert_output_is_correct( trilinear.interpolate, (voxels_in, sampling_points), (voxels_out,), tile=False) @parameterized.parameters( (1, 4, 4, 4, 1), (2, 4, 4, 4, 3), (3, 4, 4, 4, 3), ) def test_interpolate_jacobian_random(self, bsize, height, width, depth, channels): """Tests whether jacobian is correct.""" grid_3d_np = np.random.uniform(size=(bsize, height, width, depth, channels)) sampling_points_np = np.zeros((bsize, height * width * depth, 3)) sampling_points_np[:, :, 0] = np.arange(0, height * width * depth) self.assert_jacobian_is_correct_fn( lambda grid_3d: trilinear.interpolate(grid_3d, sampling_points_np), [grid_3d_np]) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for trilinear interpolation.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.representation import grid from tensorflow_graphics.geometry.transformation import rotation_matrix_3d from tensorflow_graphics.math.interpolation import trilinear from tensorflow_graphics.util import test_case def _sampling_points_from_grid(grid_size, dtype=tf.float64): """Returns a tensor of shape `[M, 3]`, with M the number of sampling points.""" sampling_points = grid.generate((-1.0, -1.0, -1.0), (1.0, 1.0, 1.0), grid_size) sampling_points = tf.cast(sampling_points, dtype) return tf.reshape(sampling_points, [-1, 3]) def _transpose_last_two_dims(sampling_points): axes = [i for i in range(len(sampling_points.shape))] axes[-1], axes[-2] = axes[-2], axes[-1] sampling_points = tf.transpose(a=sampling_points, perm=axes) return sampling_points def _sampling_points_in_volume(sampling_points, voxel_size): """Transforms the sampling points from [-1, 1] to [0, voxel_size].""" voxel_size = tf.convert_to_tensor(value=voxel_size) max_size = tf.cast(voxel_size - 1, sampling_points.dtype) return 0.5 * ((sampling_points + 1) * max_size) ANGLE_90 = np.array((np.pi / 2.,)) def _get_random_voxel_grid(voxel_size): return np.random.uniform(size=voxel_size) def _get_random_sampling_points(sampling_points_size, max_grid_dim): random_grid = np.random.randint(0, max_grid_dim, size=sampling_points_size) return random_grid.astype(np.float64) def _generate_voxels_horizontal_plane(voxel_size): voxels = np.zeros(voxel_size) mid_x = int(np.floor(voxel_size[0] / 2)) voxels[mid_x, :, :, :] = 1 if voxel_size[0] % 2 == 0: voxels[mid_x - 1, :, :, :] = 1 return voxels def _generate_voxels_vertical_plane(voxel_size): voxels = np.zeros(voxel_size) mid_y = int(np.floor(voxel_size[1] / 2)) voxels[:, mid_y, :, :] = 1 if voxel_size[1] % 2 == 0: voxels[:, mid_y - 1, :, :] = 1 return voxels def _generate_voxel_cube(dims, plane_orientation=None): if plane_orientation == "horizontal": voxels_no_batch = _generate_voxels_horizontal_plane(dims[-4:]) elif plane_orientation == "vertical": voxels_no_batch = _generate_voxels_vertical_plane(dims[-4:]) else: voxels_no_batch = np.zeros(dims[-4:]) voxels = np.zeros(dims) voxels[..., :, :, :, :] = voxels_no_batch return voxels class TrilinearTest(test_case.TestCase): @parameterized.parameters( ("must have a rank greater than 3", ((5, 5, 5), (125, 3))), ("must have a rank greater than 1", ((2, 5, 5, 5, 1), (3,))), ("must have exactly 3 dimensions in axis -1", ((2, 5, 5, 5, 1), (2, 125, 4))), ("Not all batch dimensions are broadcast-compatible.", ((2, 2, 5, 5, 5, 1), (2, 3, 125, 3))), ) def test_interpolate_exception_raised(self, error_msg, shapes): """Tests whether exceptions are raised for incompatible shapes.""" self.assert_exception_is_raised( trilinear.interpolate, error_msg, shapes=shapes) @parameterized.parameters( ((5, 5, 5, 3), (125, 3)), ((2, 5, 5, 5, 3), (2, 125, 3)), ((2, 2, 5, 5, 5, 3), (2, 2, 15, 3)), ) def test_interpolate_exception_not_raised(self, *shapes): """Tests whether exceptions are not raised for compatible shapes.""" self.assert_exception_is_not_raised(trilinear.interpolate, shapes) def test_interpolation_values_preset(self): voxels = np.zeros((2, 2, 2, 1)) voxels[(0, 1, 1, 0), (0, 0, 1, 1), (0, 0, 0, 0), 0] = 1 sampling_points = np.array(((0, 0, 0), (0.5, 0, 0), (1.0, 0, 0), (0., 0, 0.25), (0., 0, 0.5), (0., 0, 0.75), (0., 0, 1.0), (0., 0, 2.0), (-1.0, -0.5, 0), (0, 2, 1.5))) correct_values = np.array( ((1.0, 1.0, 1.0, 0.75, 0.5, 0.25, 0.0, 0.0, 1.0, 0.0),)).T self.assert_output_is_correct( trilinear.interpolate, (voxels, sampling_points), (correct_values,), tile=False) def test_interpolation_preset(self): """Tests whether interpolation results are correct.""" batch_dim_size = np.random.randint(0, 4) batch_dims = list(np.random.randint(1, 10, size=batch_dim_size)) cube_single_dim = np.random.randint(3, 10) cube_dims = [cube_single_dim, cube_single_dim, cube_single_dim] num_channels = [np.random.randint(1, 10)] combined_dims = batch_dims + cube_dims + num_channels voxels_in = _generate_voxel_cube(combined_dims, "horizontal") euler_angles = np.zeros(batch_dims + [3]) euler_angles[..., 2] = np.pi / 2. voxels_out = _generate_voxel_cube(combined_dims, "vertical") transformation_matrix = rotation_matrix_3d.from_euler(euler_angles) grid_size = (cube_single_dim, cube_single_dim, cube_single_dim) sampling_points = _sampling_points_from_grid(grid_size) sampling_points = tf.matmul(transformation_matrix, tf.transpose(a=sampling_points)) sampling_points = _transpose_last_two_dims(sampling_points) sampling_points = _sampling_points_in_volume(sampling_points, voxels_in.shape[-4:-1]) voxels_out = tf.reshape(voxels_out, batch_dims + [cube_single_dim**3] + num_channels) self.assert_output_is_correct( trilinear.interpolate, (voxels_in, sampling_points), (voxels_out,), tile=False) @parameterized.parameters( (1, 4, 4, 4, 1), (2, 4, 4, 4, 3), (3, 4, 4, 4, 3), ) def test_interpolate_jacobian_random(self, bsize, height, width, depth, channels): """Tests whether jacobian is correct.""" grid_3d_np = np.random.uniform(size=(bsize, height, width, depth, channels)) sampling_points_np = np.zeros((bsize, height * width * depth, 3)) sampling_points_np[:, :, 0] = np.arange(0, height * width * depth) self.assert_jacobian_is_correct_fn( lambda grid_3d: trilinear.interpolate(grid_3d, sampling_points_np), [grid_3d_np]) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/image/tests/matting_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for matting.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.image import matting from tensorflow_graphics.util import asserts from tensorflow_graphics.util import shape from tensorflow_graphics.util import test_case def _laplacian_matrix(image, size=3, eps=1e-5, name=None): """Generates the closed form matting Laplacian matrices. Generates the closed form matting Laplacian as proposed by Levin et al. in "A Closed Form Solution to Natural Image Matting". Args: image: A tensor of shape `[B, H, W, C]`. size: An `int` representing the size of the patches used to enforce smoothness. eps: A small number of type `float` to regularize the problem. name: A name for this op. Defaults to "matting_laplacian_matrix". Returns: A tensor of shape `[B, H, W, size^2, size^2]` containing the matting Laplacian matrices. Raises: ValueError: If `image` is not of rank 4. """ with tf.compat.v1.name_scope(name, "matting_laplacian_matrix", [image]): image = tf.convert_to_tensor(value=image) shape.check_static(image, has_rank=4) if size % 2 == 0: raise ValueError("The patch size is expected to be an odd value.") pixels = size**2 channels = tf.shape(input=image)[-1] dtype = image.dtype patches = tf.image.extract_patches( image, sizes=(1, size, size, 1), strides=(1, 1, 1, 1), rates=(1, 1, 1, 1), padding="VALID") batches = tf.shape(input=patches)[:-1] new_shape = tf.concat((batches, (pixels, channels)), axis=-1) patches = tf.reshape(patches, shape=new_shape) mean = tf.reduce_mean(input_tensor=patches, axis=-2, keepdims=True) demean = patches - mean covariance = tf.matmul(demean, demean, transpose_a=True) / pixels regularizer = (eps / pixels) * tf.eye(channels, dtype=dtype) covariance_inv = tf.linalg.inv(covariance + regularizer) covariance_inv = asserts.assert_no_infs_or_nans(covariance_inv) mat = tf.matmul(tf.matmul(demean, covariance_inv), demean, transpose_b=True) return tf.eye(pixels, dtype=dtype) - (1.0 + mat) / pixels class MattingTest(test_case.TestCase): @parameterized.parameters((3, 1), (3, 3), (5, 3), (5, 1)) def test_build_matrices_jacobian_random(self, size, channels): """Tests the Jacobian of the build_matrices function.""" tensor_shape = np.random.randint(size, 6, size=3) image_init = np.random.uniform( 0.0, 1.0, size=tensor_shape.tolist() + [channels]) with self.subTest(name="laplacian"): self.assert_jacobian_is_correct_fn( lambda image: matting.build_matrices(image, size=size)[0], [image_init]) with self.subTest(name="pseudo_inverse"): self.assert_jacobian_is_correct_fn( lambda image: matting.build_matrices(image, size=size)[1], [image_init]) @parameterized.parameters((3, 1), (3, 3), (5, 3), (5, 1)) def test_build_matrices_laplacian_zero_rows_and_columns(self, size, channels): """Tests that the laplacian matrix rows and columns sum to zero.""" tensor_shape = np.random.randint(size, 6, size=3) image_init = np.random.uniform( 0.0, 1.0, size=tensor_shape.tolist() + [channels]) image = tf.convert_to_tensor(value=image_init) laplacian, _ = matting.build_matrices(image, size=size) rows = tf.reduce_sum(input_tensor=laplacian, axis=-2) columns = tf.reduce_sum(input_tensor=laplacian, axis=-1) with self.subTest(name="rows"): self.assertAllClose(rows, tf.zeros_like(rows)) with self.subTest(name="columns"): self.assertAllClose(columns, tf.zeros_like(columns)) @parameterized.parameters((3, 1), (3, 3), (5, 3), (5, 1)) def test_build_matrices_laplacian_versions(self, size, channels): """Compares two ways of computing the laplacian matrix.""" tensor_shape = np.random.randint(size, 6, size=3) image_init = np.random.uniform( 0.0, 1.0, size=tensor_shape.tolist() + [channels]) image = tf.convert_to_tensor(value=image_init) laplacian_v1, _ = matting.build_matrices(image, size=size) laplacian_v2 = _laplacian_matrix(image, size=size) self.assertAllClose(laplacian_v1, laplacian_v2) @parameterized.parameters( (3, (None, None, None, 1)), (3, (None, None, None, 3)), (5, (None, None, None, 1)), (5, (None, None, None, 3)), (3, (1, 3, 3, 1)), (3, (1, 3, 3, 3)), (5, (1, 5, 5, 1)), (5, (1, 5, 5, 3)), ) def test_build_matrices_not_raised(self, size, *shapes): """Tests that the shape exceptions are not raised.""" build_matrices = lambda image: matting.build_matrices(image, size=size) self.assert_exception_is_not_raised(build_matrices, shapes) @parameterized.parameters( ("tensor must have a rank of 4, but it has rank", 3, (1,)), ("tensor must have a rank of 4, but it has rank", 3, (1, 1, 1, 1, 1)), ("The patch size is expected to be an odd value.", 2, (1, 1, 1, 1)), ) def test_build_matrices_raised(self, error_msg, size, *shapes): """Tests that the shape exceptions are properly raised.""" build_matrices = lambda image: matting.build_matrices(image, size=size) self.assert_exception_is_raised(build_matrices, error_msg, shapes) @parameterized.parameters((3,), (5,)) def test_linear_coefficients_jacobian_random(self, size): """Tests the Jacobian of the linear_coefficients function.""" tensor_shape = np.random.randint(size, 6, size=3) matte_init = np.random.uniform(0.0, 1.0, size=tensor_shape.tolist() + [1]) tensor_shape[1:3] -= (size - 1) num_coeffs = np.random.randint(2, 4) pseudo_inverse_init = np.random.uniform( 0.0, 1.0, size=tensor_shape.tolist() + [num_coeffs, size**2]) def a_fn(matte, pseudo_inverse): a, _ = matting.linear_coefficients(matte, pseudo_inverse) return a def b_fn(matte, pseudo_inverse): _, b = matting.linear_coefficients(matte, pseudo_inverse) return b with self.subTest(name="a"): self.assert_jacobian_is_correct_fn(a_fn, [matte_init, pseudo_inverse_init]) with self.subTest(name="b"): self.assert_jacobian_is_correct_fn(b_fn, [matte_init, pseudo_inverse_init]) @parameterized.parameters( ((None, None, None, 1), (None, None, None, 4, 9)), ((None, None, None, 1), (None, None, None, 2, 25)), ((1, 6, 6, 1), (1, 4, 4, 2, 9)), ((1, 10, 10, 1), (1, 6, 6, 2, 25)), ) def test_linear_coefficients_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(matting.linear_coefficients, shapes) @parameterized.parameters( ("must have exactly 1 dimensions in axis -1", (1, 6, 6, 2), (1, 4, 4, 2, 9)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (2, 4, 4, 2, 9)), ) def test_linear_coefficients_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(matting.linear_coefficients, error_msg, shapes) @parameterized.parameters((3,), (5,)) def test_linear_coefficients_reconstruction_same_images(self, size): """Tests that the matte can be reconstructed by using the coefficients .""" tensor_shape = np.random.randint(size, 6, size=3).tolist() image = np.random.uniform(0.0, 1.0, size=tensor_shape + [1]) _, pseudo_inverse = matting.build_matrices(image, size=size) a, b = matting.linear_coefficients(image, pseudo_inverse) reconstructed = matting.reconstruct(image, a, b) self.assertAllClose(image, reconstructed, atol=1e-4) @parameterized.parameters((3,), (5,)) def test_linear_coefficients_reconstruction_opposite_images(self, size): """Tests that the matte can be reconstructed by using the coefficients .""" tensor_shape = np.random.randint(size, 6, size=3).tolist() image = np.random.uniform(0.0, 1.0, size=tensor_shape + [1]) _, pseudo_inverse = matting.build_matrices(image, size=size) a, b = matting.linear_coefficients(1.0 - image, pseudo_inverse) reconstructed = matting.reconstruct(image, a, b) self.assertAllClose(1.0 - image, reconstructed, atol=1e-4) @parameterized.parameters((3,), (5,)) def test_loss_jacobian_random(self, size): """Tests the Jacobian of the matting loss function.""" tensor_shape = np.random.randint(size, 6, size=3) matte_init = np.random.uniform(0.0, 1.0, size=tensor_shape.tolist() + [1]) tensor_shape[1:3] -= (size - 1) laplacian_init = np.random.uniform( 0.0, 1.0, size=tensor_shape.tolist() + [size**2, size**2]) with self.subTest(name="matte"): self.assert_jacobian_is_correct_fn(matting.loss, [matte_init, laplacian_init]) @parameterized.parameters( ((None, None, None, 1), (None, None, None, 9, 9)), ((None, None, None, 1), (None, None, None, 25, 25)), ((1, 6, 6, 1), (1, 4, 4, 9, 9)), ((1, 10, 10, 1), (1, 6, 6, 25, 25)), ) def test_loss_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(matting.loss, shapes) @parameterized.parameters( ("must have exactly 1 dimensions in axis -1", (1, 6, 6, 2), (1, 4, 4, 9, 9)), ("must have exactly 9 dimensions in axis -2", (1, 6, 6, 1), (1, 4, 4, 1, 9)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (2, 4, 4, 9, 9)), ) def test_loss_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(matting.loss, error_msg, shapes) @parameterized.parameters((3,), (5,)) def test_loss_opposite_images(self, size): """Tests that passing opposite images results in a loss close to 0.0.""" tensor_shape = np.random.randint(size, 6, size=3).tolist() image = np.random.uniform(0.0, 1.0, size=tensor_shape + [1]) laplacian, _ = matting.build_matrices(image, size=size) loss = matting.loss(1.0 - image, laplacian) self.assertAllClose(loss, 0.0, atol=1e-4) @parameterized.parameters((3,), (5,)) def test_loss_same_images(self, size): """Tests that passing same images results in a loss close to 0.0.""" tensor_shape = np.random.randint(size, 6, size=3).tolist() image = np.random.uniform(0.0, 1.0, size=tensor_shape + [1]) laplacian, _ = matting.build_matrices(image, size=size) loss = matting.loss(image, laplacian) self.assertAllClose(loss, 0.0, atol=1e-4) @parameterized.parameters((3,), (5,)) def test_loss_positive(self, size): """Tests that the loss is always greater or equal to 0.0.""" tensor_shape = np.random.randint(size, 6, size=3).tolist() image = tf.random.uniform(minval=0.0, maxval=1.0, shape=tensor_shape + [3]) matte = tf.random.uniform(minval=0.0, maxval=1.0, shape=tensor_shape + [1]) laplacian, _ = matting.build_matrices(image, size=size) loss = matting.loss(matte, laplacian) self.assertAllGreaterEqual(loss, 0.0) @parameterized.parameters((1,), (3,)) def test_reconstruct_jacobian_random(self, channels): """Tests the Jacobian of the reconstruct function.""" tensor_shape = np.random.randint(1, 5, size=3).tolist() image_init = np.random.uniform(0.0, 1.0, size=tensor_shape + [channels]) mul_init = np.random.uniform(0.0, 1.0, size=tensor_shape + [channels]) add_init = np.random.uniform(0.0, 1.0, size=tensor_shape + [1]) self.assert_jacobian_is_correct_fn(matting.reconstruct, [image_init, mul_init, add_init]) @parameterized.parameters( ((None, None, None, 3), (None, None, None, 3), (None, None, None, 1)), ((1, 6, 6, 3), (1, 6, 6, 3), (1, 6, 6, 1)), ) def test_reconstruct_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(matting.reconstruct, shapes) @parameterized.parameters( ("tensor must have a rank of 4, but it has rank", (1, 6, 6), (1, 6, 6, 2), (1, 6, 6, 1)), ("tensor must have a rank of 4, but it has rank", (1, 6, 6, 2), (1, 6, 6), (1, 6, 6, 1)), ("tensor must have a rank of 4, but it has rank", (1, 6, 6, 2), (1, 6, 6, 2), (1, 6, 6)), ("must have exactly 1 dimensions in axis -1", (1, 6, 6, 2), (1, 6, 6, 2), (1, 6, 6, 2)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 6, 4), (1, 6, 6, 1)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 4, 6, 1), (1, 6, 6, 1)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 6, 1), (1, 4, 6, 1)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 4, 1), (1, 6, 6, 1)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 6, 1), (1, 6, 4, 1)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (4, 6, 6, 1), (1, 6, 6, 1)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 6, 1), (4, 6, 6, 1)), ) def test_reconstruct_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(matting.reconstruct, error_msg, shapes) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for matting.""" from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.image import matting from tensorflow_graphics.util import asserts from tensorflow_graphics.util import shape from tensorflow_graphics.util import test_case def _laplacian_matrix(image, size=3, eps=1e-5, name=None): """Generates the closed form matting Laplacian matrices. Generates the closed form matting Laplacian as proposed by Levin et al. in "A Closed Form Solution to Natural Image Matting". Args: image: A tensor of shape `[B, H, W, C]`. size: An `int` representing the size of the patches used to enforce smoothness. eps: A small number of type `float` to regularize the problem. name: A name for this op. Defaults to "matting_laplacian_matrix". Returns: A tensor of shape `[B, H, W, size^2, size^2]` containing the matting Laplacian matrices. Raises: ValueError: If `image` is not of rank 4. """ with tf.compat.v1.name_scope(name, "matting_laplacian_matrix", [image]): image = tf.convert_to_tensor(value=image) shape.check_static(image, has_rank=4) if size % 2 == 0: raise ValueError("The patch size is expected to be an odd value.") pixels = size**2 channels = tf.shape(input=image)[-1] dtype = image.dtype patches = tf.image.extract_patches( image, sizes=(1, size, size, 1), strides=(1, 1, 1, 1), rates=(1, 1, 1, 1), padding="VALID") batches = tf.shape(input=patches)[:-1] new_shape = tf.concat((batches, (pixels, channels)), axis=-1) patches = tf.reshape(patches, shape=new_shape) mean = tf.reduce_mean(input_tensor=patches, axis=-2, keepdims=True) demean = patches - mean covariance = tf.matmul(demean, demean, transpose_a=True) / pixels regularizer = (eps / pixels) * tf.eye(channels, dtype=dtype) covariance_inv = tf.linalg.inv(covariance + regularizer) covariance_inv = asserts.assert_no_infs_or_nans(covariance_inv) mat = tf.matmul(tf.matmul(demean, covariance_inv), demean, transpose_b=True) return tf.eye(pixels, dtype=dtype) - (1.0 + mat) / pixels class MattingTest(test_case.TestCase): @parameterized.parameters((3, 1), (3, 3), (5, 3), (5, 1)) def test_build_matrices_jacobian_random(self, size, channels): """Tests the Jacobian of the build_matrices function.""" tensor_shape = np.random.randint(size, 6, size=3) image_init = np.random.uniform( 0.0, 1.0, size=tensor_shape.tolist() + [channels]) with self.subTest(name="laplacian"): self.assert_jacobian_is_correct_fn( lambda image: matting.build_matrices(image, size=size)[0], [image_init]) with self.subTest(name="pseudo_inverse"): self.assert_jacobian_is_correct_fn( lambda image: matting.build_matrices(image, size=size)[1], [image_init]) @parameterized.parameters((3, 1), (3, 3), (5, 3), (5, 1)) def test_build_matrices_laplacian_zero_rows_and_columns(self, size, channels): """Tests that the laplacian matrix rows and columns sum to zero.""" tensor_shape = np.random.randint(size, 6, size=3) image_init = np.random.uniform( 0.0, 1.0, size=tensor_shape.tolist() + [channels]) image = tf.convert_to_tensor(value=image_init) laplacian, _ = matting.build_matrices(image, size=size) rows = tf.reduce_sum(input_tensor=laplacian, axis=-2) columns = tf.reduce_sum(input_tensor=laplacian, axis=-1) with self.subTest(name="rows"): self.assertAllClose(rows, tf.zeros_like(rows)) with self.subTest(name="columns"): self.assertAllClose(columns, tf.zeros_like(columns)) @parameterized.parameters((3, 1), (3, 3), (5, 3), (5, 1)) def test_build_matrices_laplacian_versions(self, size, channels): """Compares two ways of computing the laplacian matrix.""" tensor_shape = np.random.randint(size, 6, size=3) image_init = np.random.uniform( 0.0, 1.0, size=tensor_shape.tolist() + [channels]) image = tf.convert_to_tensor(value=image_init) laplacian_v1, _ = matting.build_matrices(image, size=size) laplacian_v2 = _laplacian_matrix(image, size=size) self.assertAllClose(laplacian_v1, laplacian_v2) @parameterized.parameters( (3, (None, None, None, 1)), (3, (None, None, None, 3)), (5, (None, None, None, 1)), (5, (None, None, None, 3)), (3, (1, 3, 3, 1)), (3, (1, 3, 3, 3)), (5, (1, 5, 5, 1)), (5, (1, 5, 5, 3)), ) def test_build_matrices_not_raised(self, size, *shapes): """Tests that the shape exceptions are not raised.""" build_matrices = lambda image: matting.build_matrices(image, size=size) self.assert_exception_is_not_raised(build_matrices, shapes) @parameterized.parameters( ("tensor must have a rank of 4, but it has rank", 3, (1,)), ("tensor must have a rank of 4, but it has rank", 3, (1, 1, 1, 1, 1)), ("The patch size is expected to be an odd value.", 2, (1, 1, 1, 1)), ) def test_build_matrices_raised(self, error_msg, size, *shapes): """Tests that the shape exceptions are properly raised.""" build_matrices = lambda image: matting.build_matrices(image, size=size) self.assert_exception_is_raised(build_matrices, error_msg, shapes) @parameterized.parameters((3,), (5,)) def test_linear_coefficients_jacobian_random(self, size): """Tests the Jacobian of the linear_coefficients function.""" tensor_shape = np.random.randint(size, 6, size=3) matte_init = np.random.uniform(0.0, 1.0, size=tensor_shape.tolist() + [1]) tensor_shape[1:3] -= (size - 1) num_coeffs = np.random.randint(2, 4) pseudo_inverse_init = np.random.uniform( 0.0, 1.0, size=tensor_shape.tolist() + [num_coeffs, size**2]) def a_fn(matte, pseudo_inverse): a, _ = matting.linear_coefficients(matte, pseudo_inverse) return a def b_fn(matte, pseudo_inverse): _, b = matting.linear_coefficients(matte, pseudo_inverse) return b with self.subTest(name="a"): self.assert_jacobian_is_correct_fn(a_fn, [matte_init, pseudo_inverse_init]) with self.subTest(name="b"): self.assert_jacobian_is_correct_fn(b_fn, [matte_init, pseudo_inverse_init]) @parameterized.parameters( ((None, None, None, 1), (None, None, None, 4, 9)), ((None, None, None, 1), (None, None, None, 2, 25)), ((1, 6, 6, 1), (1, 4, 4, 2, 9)), ((1, 10, 10, 1), (1, 6, 6, 2, 25)), ) def test_linear_coefficients_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(matting.linear_coefficients, shapes) @parameterized.parameters( ("must have exactly 1 dimensions in axis -1", (1, 6, 6, 2), (1, 4, 4, 2, 9)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (2, 4, 4, 2, 9)), ) def test_linear_coefficients_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(matting.linear_coefficients, error_msg, shapes) @parameterized.parameters((3,), (5,)) def test_linear_coefficients_reconstruction_same_images(self, size): """Tests that the matte can be reconstructed by using the coefficients .""" tensor_shape = np.random.randint(size, 6, size=3).tolist() image = np.random.uniform(0.0, 1.0, size=tensor_shape + [1]) _, pseudo_inverse = matting.build_matrices(image, size=size) a, b = matting.linear_coefficients(image, pseudo_inverse) reconstructed = matting.reconstruct(image, a, b) self.assertAllClose(image, reconstructed, atol=1e-4) @parameterized.parameters((3,), (5,)) def test_linear_coefficients_reconstruction_opposite_images(self, size): """Tests that the matte can be reconstructed by using the coefficients .""" tensor_shape = np.random.randint(size, 6, size=3).tolist() image = np.random.uniform(0.0, 1.0, size=tensor_shape + [1]) _, pseudo_inverse = matting.build_matrices(image, size=size) a, b = matting.linear_coefficients(1.0 - image, pseudo_inverse) reconstructed = matting.reconstruct(image, a, b) self.assertAllClose(1.0 - image, reconstructed, atol=1e-4) @parameterized.parameters((3,), (5,)) def test_loss_jacobian_random(self, size): """Tests the Jacobian of the matting loss function.""" tensor_shape = np.random.randint(size, 6, size=3) matte_init = np.random.uniform(0.0, 1.0, size=tensor_shape.tolist() + [1]) tensor_shape[1:3] -= (size - 1) laplacian_init = np.random.uniform( 0.0, 1.0, size=tensor_shape.tolist() + [size**2, size**2]) with self.subTest(name="matte"): self.assert_jacobian_is_correct_fn(matting.loss, [matte_init, laplacian_init]) @parameterized.parameters( ((None, None, None, 1), (None, None, None, 9, 9)), ((None, None, None, 1), (None, None, None, 25, 25)), ((1, 6, 6, 1), (1, 4, 4, 9, 9)), ((1, 10, 10, 1), (1, 6, 6, 25, 25)), ) def test_loss_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(matting.loss, shapes) @parameterized.parameters( ("must have exactly 1 dimensions in axis -1", (1, 6, 6, 2), (1, 4, 4, 9, 9)), ("must have exactly 9 dimensions in axis -2", (1, 6, 6, 1), (1, 4, 4, 1, 9)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (2, 4, 4, 9, 9)), ) def test_loss_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(matting.loss, error_msg, shapes) @parameterized.parameters((3,), (5,)) def test_loss_opposite_images(self, size): """Tests that passing opposite images results in a loss close to 0.0.""" tensor_shape = np.random.randint(size, 6, size=3).tolist() image = np.random.uniform(0.0, 1.0, size=tensor_shape + [1]) laplacian, _ = matting.build_matrices(image, size=size) loss = matting.loss(1.0 - image, laplacian) self.assertAllClose(loss, 0.0, atol=1e-4) @parameterized.parameters((3,), (5,)) def test_loss_same_images(self, size): """Tests that passing same images results in a loss close to 0.0.""" tensor_shape = np.random.randint(size, 6, size=3).tolist() image = np.random.uniform(0.0, 1.0, size=tensor_shape + [1]) laplacian, _ = matting.build_matrices(image, size=size) loss = matting.loss(image, laplacian) self.assertAllClose(loss, 0.0, atol=1e-4) @parameterized.parameters((3,), (5,)) def test_loss_positive(self, size): """Tests that the loss is always greater or equal to 0.0.""" tensor_shape = np.random.randint(size, 6, size=3).tolist() image = tf.random.uniform(minval=0.0, maxval=1.0, shape=tensor_shape + [3]) matte = tf.random.uniform(minval=0.0, maxval=1.0, shape=tensor_shape + [1]) laplacian, _ = matting.build_matrices(image, size=size) loss = matting.loss(matte, laplacian) self.assertAllGreaterEqual(loss, 0.0) @parameterized.parameters((1,), (3,)) def test_reconstruct_jacobian_random(self, channels): """Tests the Jacobian of the reconstruct function.""" tensor_shape = np.random.randint(1, 5, size=3).tolist() image_init = np.random.uniform(0.0, 1.0, size=tensor_shape + [channels]) mul_init = np.random.uniform(0.0, 1.0, size=tensor_shape + [channels]) add_init = np.random.uniform(0.0, 1.0, size=tensor_shape + [1]) self.assert_jacobian_is_correct_fn(matting.reconstruct, [image_init, mul_init, add_init]) @parameterized.parameters( ((None, None, None, 3), (None, None, None, 3), (None, None, None, 1)), ((1, 6, 6, 3), (1, 6, 6, 3), (1, 6, 6, 1)), ) def test_reconstruct_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(matting.reconstruct, shapes) @parameterized.parameters( ("tensor must have a rank of 4, but it has rank", (1, 6, 6), (1, 6, 6, 2), (1, 6, 6, 1)), ("tensor must have a rank of 4, but it has rank", (1, 6, 6, 2), (1, 6, 6), (1, 6, 6, 1)), ("tensor must have a rank of 4, but it has rank", (1, 6, 6, 2), (1, 6, 6, 2), (1, 6, 6)), ("must have exactly 1 dimensions in axis -1", (1, 6, 6, 2), (1, 6, 6, 2), (1, 6, 6, 2)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 6, 4), (1, 6, 6, 1)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 4, 6, 1), (1, 6, 6, 1)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 6, 1), (1, 4, 6, 1)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 4, 1), (1, 6, 6, 1)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 6, 1), (1, 6, 4, 1)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (4, 6, 6, 1), (1, 6, 6, 1)), ("Not all batch dimensions are identical.", (1, 6, 6, 1), (1, 6, 6, 1), (4, 6, 6, 1)), ) def test_reconstruct_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(matting.reconstruct, error_msg, shapes) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/geometry/convolution/graph_convolution.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements various graph convolutions in TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.geometry.convolution import utils from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def feature_steered_convolution(data, neighbors, sizes, var_u, var_v, var_c, var_w, var_b, name=None): # pyformat: disable """Implements the Feature Steered graph convolution. FeaStNet: Feature-Steered Graph Convolutions for 3D Shape Analysis Nitika Verma, Edmond Boyer, Jakob Verbeek CVPR 2018 https://arxiv.org/abs/1706.05206 The shorthands used below are `V`: The number of vertices. `C`: The number of channels in the input data. `D`: The number of channels in the output after convolution. `W`: The number of weight matrices used in the convolution. The input variables (`var_u`, `var_v`, `var_c`, `var_w`, `var_b`) correspond to the variables with the same names in the paper cited above. Note: In the following, A1 to An are optional batch dimensions. Args: data: A `float` tensor with shape `[A1, ..., An, V, C]`. neighbors: A `SparseTensor` with the same type as `data` and with shape `[A1, ..., An, V, V]` representing vertex neighborhoods. The neighborhood of a vertex defines the support region for convolution. For a mesh, a common choice for the neighborhood of vertex i would be the vertices in the K-ring of i (including i itself). Each vertex must have at least one neighbor. For a faithful implementation of the FeaStNet convolution, neighbors should be a row-normalized weight matrix corresponding to the graph adjacency matrix with self-edges: `neighbors[A1, ..., An, i, j] > 0` if vertex j is a neighbor of i, and `neighbors[A1, ..., An, i, i] > 0` for all i, and `sum(neighbors, axis=-1)[A1, ..., An, i] == 1.0 for all i`. These requirements are relaxed in this implementation. sizes: An `int` tensor of shape `[A1, ..., An]` indicating the true input sizes in case of padding (`sizes=None` indicates no padding).Note that `sizes[A1, ..., An] <= V`. If `data` and `neighbors` are 2-D, `sizes` will be ignored. An example usage of `sizes`: consider an input consisting of three graphs G0, G1, and G2 with V0, V1, and V2 vertices respectively. The padded input would have the following shapes: `data.shape = [3, V, C]` and `neighbors.shape = [3, V, V]`, where `V = max([V0, V1, V2])`. The true sizes of each graph will be specified by `sizes=[V0, V1, V2]`, `data[i, :Vi, :]` and `neighbors[i, :Vi, :Vi]` will be the vertex and neighborhood data of graph Gi. The `SparseTensor` `neighbors` should have no nonzero entries in the padded regions. var_u: A 2-D tensor with shape `[C, W]`. var_v: A 2-D tensor with shape `[C, W]`. var_c: A 1-D tensor with shape `[W]`. var_w: A 3-D tensor with shape `[W, C, D]`. var_b: A 1-D tensor with shape `[D]`. name: A name for this op. Defaults to `graph_convolution_feature_steered_convolution`. Returns: Tensor with shape `[A1, ..., An, V, D]`. Raises: TypeError: if the input types are invalid. ValueError: if the input dimensions are invalid. """ # pyformat: enable with tf.compat.v1.name_scope( name, "graph_convolution_feature_steered_convolution", [data, neighbors, sizes, var_u, var_v, var_c, var_w, var_b]): data = tf.convert_to_tensor(value=data) neighbors = tf.compat.v1.convert_to_tensor_or_sparse_tensor(value=neighbors) if sizes is not None: sizes = tf.convert_to_tensor(value=sizes) var_u = tf.convert_to_tensor(value=var_u) var_v = tf.convert_to_tensor(value=var_v) var_c = tf.convert_to_tensor(value=var_c) var_w = tf.convert_to_tensor(value=var_w) var_b = tf.convert_to_tensor(value=var_b) data_ndims = data.shape.ndims utils.check_valid_graph_convolution_input(data, neighbors, sizes) shape.compare_dimensions( tensors=(data, var_u, var_v, var_w), tensor_names=("data", "var_u", "var_v", "var_w"), axes=(-1, 0, 0, 1)) shape.compare_dimensions( tensors=(var_u, var_v, var_c, var_w), tensor_names=("var_u", "var_v", "var_c", "var_w"), axes=(1, 1, 0, 0)) shape.compare_dimensions( tensors=(var_w, var_b), tensor_names=("var_w", "var_b"), axes=-1) # Flatten the batch dimensions and remove any vertex padding. if data_ndims > 2: if sizes is not None: sizes_square = tf.stack((sizes, sizes), axis=-1) else: sizes_square = None x_flat, unflatten = utils.flatten_batch_to_2d(data, sizes) adjacency = utils.convert_to_block_diag_2d(neighbors, sizes_square) else: x_flat = data adjacency = neighbors x_u = tf.matmul(x_flat, var_u) x_v = tf.matmul(x_flat, var_v) adjacency_ind_0 = adjacency.indices[:, 0] adjacency_ind_1 = adjacency.indices[:, 1] x_u_rep = tf.gather(x_u, adjacency_ind_0) x_v_sep = tf.gather(x_v, adjacency_ind_1) weights_q = tf.exp(x_u_rep + x_v_sep + tf.reshape(var_c, (1, -1))) weights_q_sum = tf.reduce_sum( input_tensor=weights_q, axis=-1, keepdims=True) weights_q = weights_q / weights_q_sum y_i_m = [] x_sep = tf.gather(x_flat, adjacency_ind_1) q_m_list = tf.unstack(weights_q, axis=-1) w_m_list = tf.unstack(var_w, axis=0) x_flat_shape = tf.shape(input=x_flat) for q_m, w_m in zip(q_m_list, w_m_list): # Compute `y_i_m = sum_{j in neighborhood(i)} q_m(x_i, x_j) * w_m * x_j`. q_m = tf.expand_dims(q_m, axis=-1) p_sum = tf.math.unsorted_segment_sum( data=(q_m * x_sep) * tf.expand_dims(adjacency.values, -1), segment_ids=adjacency_ind_0, num_segments=x_flat_shape[0]) y_i_m.append(tf.matmul(p_sum, w_m)) y_out = tf.add_n(inputs=y_i_m) + tf.reshape(var_b, [1, -1]) if data_ndims > 2: y_out = unflatten(y_out) return y_out def edge_convolution_template(data, neighbors, sizes, edge_function, reduction, edge_function_kwargs, name=None): # pyformat: disable r"""A template for edge convolutions. This function implements a general edge convolution for graphs of the form \\(y_i = \sum_{j \in \mathcal{N}(i)} w_{ij} f(x_i, x_j)\\), where \\(\mathcal{N}(i)\\) is the set of vertices in the neighborhood of vertex \\(i\\), \\(x_i \in \mathbb{R}^C\\) are the features at vertex \\(i\\), \\(w_{ij} \in \mathbb{R}\\) is the weight for the edge between vertex \\(i\\) and vertex \\(j\\), and finally \\(f(x_i, x_j): \mathbb{R}^{C} \times \mathbb{R}^{C} \to \mathbb{R}^{D}\\) is a user-supplied function. This template also implements the same general edge convolution described above with a max-reduction instead of a weighted sum. An example of how this template can be used is for Laplacian smoothing, which is defined as $$y_i = \frac{1}{|\mathcal{N(i)}|} \sum_{j \in \mathcal{N(i)}} x_j$$. `edge_convolution_template` can be used to perform Laplacian smoothing by setting $$w_{ij} = \frac{1}{|\mathcal{N(i)}|}$$, `edge_function=lambda x, y: y`, and `reduction='weighted'`. The shorthands used below are `V`: The number of vertices. `C`: The number of channels in the input data. Note: In the following, A1 to An are optional batch dimensions. Args: data: A `float` tensor with shape `[A1, ..., An, V, C]`. neighbors: A `SparseTensor` with the same type as `data` and with shape `[A1, ..., An, V, V]` representing vertex neighborhoods. The neighborhood of a vertex defines the support region for convolution. The value at `neighbors[A1, ..., An, i, j]` corresponds to the weight \\(w_{ij}\\) above. Each vertex must have at least one neighbor. sizes: An `int` tensor of shape `[A1, ..., An]` indicating the true input sizes in case of padding (`sizes=None` indicates no padding). Note that `sizes[A1, ..., An] <= V`. If `data` and `neighbors` are 2-D, `sizes` will be ignored. As an example, consider an input consisting of three graphs G0, G1, and G2 with V0, V1, and V2 vertices respectively. The padded input would have the shapes `[3, V, C]`, and `[3, V, V]` for `data` and `neighbors` respectively, where `V = max([V0, V1, V2])`. The true sizes of each graph will be specified by `sizes=[V0, V1, V2]` and `data[i, :Vi, :]` and `neighbors[i, :Vi, :Vi]` will be the vertex and neighborhood data of graph Gi. The `SparseTensor` `neighbors` should have no nonzero entries in the padded regions. edge_function: A callable that takes at least two arguments of vertex features and returns a tensor of vertex features. `Y = f(X1, X2, **kwargs)`, where `X1` and `X2` have shape `[V3, C]` and `Y` must have shape `[V3, D], D >= 1`. reduction: Either 'weighted' or 'max'. Specifies the reduction over the neighborhood. For 'weighted', the reduction is a weighted sum as shown in the equation above. For 'max' the reduction is a max over features in which case the weights $$w_{ij}$$ are ignored. edge_function_kwargs: A dict containing any additional keyword arguments to be passed to `edge_function`. name: A name for this op. Defaults to `graph_convolution_edge_convolution_template`. Returns: Tensor with shape `[A1, ..., An, V, D]`. Raises: TypeError: if the input types are invalid. ValueError: if the input dimensions are invalid. """ # pyformat: enable with tf.compat.v1.name_scope(name, "graph_convolution_edge_convolution_template", [data, neighbors, sizes]): data = tf.convert_to_tensor(value=data) neighbors = tf.compat.v1.convert_to_tensor_or_sparse_tensor(value=neighbors) if sizes is not None: sizes = tf.convert_to_tensor(value=sizes) data_ndims = data.shape.ndims utils.check_valid_graph_convolution_input(data, neighbors, sizes) # Flatten the batch dimensions and remove any vertex padding. if data_ndims > 2: if sizes is not None: sizes_square = tf.stack((sizes, sizes), axis=-1) else: sizes_square = None x_flat, unflatten = utils.flatten_batch_to_2d(data, sizes) adjacency = utils.convert_to_block_diag_2d(neighbors, sizes_square) else: x_flat = data adjacency = neighbors adjacency_ind_0 = adjacency.indices[:, 0] adjacency_ind_1 = adjacency.indices[:, 1] vertex_features = tf.gather(x_flat, adjacency_ind_0) neighbor_features = tf.gather(x_flat, adjacency_ind_1) edge_features = edge_function(vertex_features, neighbor_features, **edge_function_kwargs) if reduction == "weighted": edge_features_weighted = edge_features * tf.expand_dims( adjacency.values, -1) features = tf.math.unsorted_segment_sum( data=edge_features_weighted, segment_ids=adjacency_ind_0, num_segments=tf.shape(input=x_flat)[0]) elif reduction == "max": features = tf.math.segment_max(data=edge_features, segment_ids=adjacency_ind_0) else: raise ValueError("The reduction method must be 'weighted' or 'max'") features.set_shape(features.shape.merge_with( (tf.compat.v1.dimension_value(x_flat.shape[0]), tf.compat.v1.dimension_value(edge_features.shape[-1])))) if data_ndims > 2: features = unflatten(features) return features # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements various graph convolutions in TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.geometry.convolution import utils from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def feature_steered_convolution(data, neighbors, sizes, var_u, var_v, var_c, var_w, var_b, name=None): # pyformat: disable """Implements the Feature Steered graph convolution. FeaStNet: Feature-Steered Graph Convolutions for 3D Shape Analysis Nitika Verma, Edmond Boyer, Jakob Verbeek CVPR 2018 https://arxiv.org/abs/1706.05206 The shorthands used below are `V`: The number of vertices. `C`: The number of channels in the input data. `D`: The number of channels in the output after convolution. `W`: The number of weight matrices used in the convolution. The input variables (`var_u`, `var_v`, `var_c`, `var_w`, `var_b`) correspond to the variables with the same names in the paper cited above. Note: In the following, A1 to An are optional batch dimensions. Args: data: A `float` tensor with shape `[A1, ..., An, V, C]`. neighbors: A `SparseTensor` with the same type as `data` and with shape `[A1, ..., An, V, V]` representing vertex neighborhoods. The neighborhood of a vertex defines the support region for convolution. For a mesh, a common choice for the neighborhood of vertex i would be the vertices in the K-ring of i (including i itself). Each vertex must have at least one neighbor. For a faithful implementation of the FeaStNet convolution, neighbors should be a row-normalized weight matrix corresponding to the graph adjacency matrix with self-edges: `neighbors[A1, ..., An, i, j] > 0` if vertex j is a neighbor of i, and `neighbors[A1, ..., An, i, i] > 0` for all i, and `sum(neighbors, axis=-1)[A1, ..., An, i] == 1.0 for all i`. These requirements are relaxed in this implementation. sizes: An `int` tensor of shape `[A1, ..., An]` indicating the true input sizes in case of padding (`sizes=None` indicates no padding).Note that `sizes[A1, ..., An] <= V`. If `data` and `neighbors` are 2-D, `sizes` will be ignored. An example usage of `sizes`: consider an input consisting of three graphs G0, G1, and G2 with V0, V1, and V2 vertices respectively. The padded input would have the following shapes: `data.shape = [3, V, C]` and `neighbors.shape = [3, V, V]`, where `V = max([V0, V1, V2])`. The true sizes of each graph will be specified by `sizes=[V0, V1, V2]`, `data[i, :Vi, :]` and `neighbors[i, :Vi, :Vi]` will be the vertex and neighborhood data of graph Gi. The `SparseTensor` `neighbors` should have no nonzero entries in the padded regions. var_u: A 2-D tensor with shape `[C, W]`. var_v: A 2-D tensor with shape `[C, W]`. var_c: A 1-D tensor with shape `[W]`. var_w: A 3-D tensor with shape `[W, C, D]`. var_b: A 1-D tensor with shape `[D]`. name: A name for this op. Defaults to `graph_convolution_feature_steered_convolution`. Returns: Tensor with shape `[A1, ..., An, V, D]`. Raises: TypeError: if the input types are invalid. ValueError: if the input dimensions are invalid. """ # pyformat: enable with tf.compat.v1.name_scope( name, "graph_convolution_feature_steered_convolution", [data, neighbors, sizes, var_u, var_v, var_c, var_w, var_b]): data = tf.convert_to_tensor(value=data) neighbors = tf.compat.v1.convert_to_tensor_or_sparse_tensor(value=neighbors) if sizes is not None: sizes = tf.convert_to_tensor(value=sizes) var_u = tf.convert_to_tensor(value=var_u) var_v = tf.convert_to_tensor(value=var_v) var_c = tf.convert_to_tensor(value=var_c) var_w = tf.convert_to_tensor(value=var_w) var_b = tf.convert_to_tensor(value=var_b) data_ndims = data.shape.ndims utils.check_valid_graph_convolution_input(data, neighbors, sizes) shape.compare_dimensions( tensors=(data, var_u, var_v, var_w), tensor_names=("data", "var_u", "var_v", "var_w"), axes=(-1, 0, 0, 1)) shape.compare_dimensions( tensors=(var_u, var_v, var_c, var_w), tensor_names=("var_u", "var_v", "var_c", "var_w"), axes=(1, 1, 0, 0)) shape.compare_dimensions( tensors=(var_w, var_b), tensor_names=("var_w", "var_b"), axes=-1) # Flatten the batch dimensions and remove any vertex padding. if data_ndims > 2: if sizes is not None: sizes_square = tf.stack((sizes, sizes), axis=-1) else: sizes_square = None x_flat, unflatten = utils.flatten_batch_to_2d(data, sizes) adjacency = utils.convert_to_block_diag_2d(neighbors, sizes_square) else: x_flat = data adjacency = neighbors x_u = tf.matmul(x_flat, var_u) x_v = tf.matmul(x_flat, var_v) adjacency_ind_0 = adjacency.indices[:, 0] adjacency_ind_1 = adjacency.indices[:, 1] x_u_rep = tf.gather(x_u, adjacency_ind_0) x_v_sep = tf.gather(x_v, adjacency_ind_1) weights_q = tf.exp(x_u_rep + x_v_sep + tf.reshape(var_c, (1, -1))) weights_q_sum = tf.reduce_sum( input_tensor=weights_q, axis=-1, keepdims=True) weights_q = weights_q / weights_q_sum y_i_m = [] x_sep = tf.gather(x_flat, adjacency_ind_1) q_m_list = tf.unstack(weights_q, axis=-1) w_m_list = tf.unstack(var_w, axis=0) x_flat_shape = tf.shape(input=x_flat) for q_m, w_m in zip(q_m_list, w_m_list): # Compute `y_i_m = sum_{j in neighborhood(i)} q_m(x_i, x_j) * w_m * x_j`. q_m = tf.expand_dims(q_m, axis=-1) p_sum = tf.math.unsorted_segment_sum( data=(q_m * x_sep) * tf.expand_dims(adjacency.values, -1), segment_ids=adjacency_ind_0, num_segments=x_flat_shape[0]) y_i_m.append(tf.matmul(p_sum, w_m)) y_out = tf.add_n(inputs=y_i_m) + tf.reshape(var_b, [1, -1]) if data_ndims > 2: y_out = unflatten(y_out) return y_out def edge_convolution_template(data, neighbors, sizes, edge_function, reduction, edge_function_kwargs, name=None): # pyformat: disable r"""A template for edge convolutions. This function implements a general edge convolution for graphs of the form \\(y_i = \sum_{j \in \mathcal{N}(i)} w_{ij} f(x_i, x_j)\\), where \\(\mathcal{N}(i)\\) is the set of vertices in the neighborhood of vertex \\(i\\), \\(x_i \in \mathbb{R}^C\\) are the features at vertex \\(i\\), \\(w_{ij} \in \mathbb{R}\\) is the weight for the edge between vertex \\(i\\) and vertex \\(j\\), and finally \\(f(x_i, x_j): \mathbb{R}^{C} \times \mathbb{R}^{C} \to \mathbb{R}^{D}\\) is a user-supplied function. This template also implements the same general edge convolution described above with a max-reduction instead of a weighted sum. An example of how this template can be used is for Laplacian smoothing, which is defined as $$y_i = \frac{1}{|\mathcal{N(i)}|} \sum_{j \in \mathcal{N(i)}} x_j$$. `edge_convolution_template` can be used to perform Laplacian smoothing by setting $$w_{ij} = \frac{1}{|\mathcal{N(i)}|}$$, `edge_function=lambda x, y: y`, and `reduction='weighted'`. The shorthands used below are `V`: The number of vertices. `C`: The number of channels in the input data. Note: In the following, A1 to An are optional batch dimensions. Args: data: A `float` tensor with shape `[A1, ..., An, V, C]`. neighbors: A `SparseTensor` with the same type as `data` and with shape `[A1, ..., An, V, V]` representing vertex neighborhoods. The neighborhood of a vertex defines the support region for convolution. The value at `neighbors[A1, ..., An, i, j]` corresponds to the weight \\(w_{ij}\\) above. Each vertex must have at least one neighbor. sizes: An `int` tensor of shape `[A1, ..., An]` indicating the true input sizes in case of padding (`sizes=None` indicates no padding). Note that `sizes[A1, ..., An] <= V`. If `data` and `neighbors` are 2-D, `sizes` will be ignored. As an example, consider an input consisting of three graphs G0, G1, and G2 with V0, V1, and V2 vertices respectively. The padded input would have the shapes `[3, V, C]`, and `[3, V, V]` for `data` and `neighbors` respectively, where `V = max([V0, V1, V2])`. The true sizes of each graph will be specified by `sizes=[V0, V1, V2]` and `data[i, :Vi, :]` and `neighbors[i, :Vi, :Vi]` will be the vertex and neighborhood data of graph Gi. The `SparseTensor` `neighbors` should have no nonzero entries in the padded regions. edge_function: A callable that takes at least two arguments of vertex features and returns a tensor of vertex features. `Y = f(X1, X2, **kwargs)`, where `X1` and `X2` have shape `[V3, C]` and `Y` must have shape `[V3, D], D >= 1`. reduction: Either 'weighted' or 'max'. Specifies the reduction over the neighborhood. For 'weighted', the reduction is a weighted sum as shown in the equation above. For 'max' the reduction is a max over features in which case the weights $$w_{ij}$$ are ignored. edge_function_kwargs: A dict containing any additional keyword arguments to be passed to `edge_function`. name: A name for this op. Defaults to `graph_convolution_edge_convolution_template`. Returns: Tensor with shape `[A1, ..., An, V, D]`. Raises: TypeError: if the input types are invalid. ValueError: if the input dimensions are invalid. """ # pyformat: enable with tf.compat.v1.name_scope(name, "graph_convolution_edge_convolution_template", [data, neighbors, sizes]): data = tf.convert_to_tensor(value=data) neighbors = tf.compat.v1.convert_to_tensor_or_sparse_tensor(value=neighbors) if sizes is not None: sizes = tf.convert_to_tensor(value=sizes) data_ndims = data.shape.ndims utils.check_valid_graph_convolution_input(data, neighbors, sizes) # Flatten the batch dimensions and remove any vertex padding. if data_ndims > 2: if sizes is not None: sizes_square = tf.stack((sizes, sizes), axis=-1) else: sizes_square = None x_flat, unflatten = utils.flatten_batch_to_2d(data, sizes) adjacency = utils.convert_to_block_diag_2d(neighbors, sizes_square) else: x_flat = data adjacency = neighbors adjacency_ind_0 = adjacency.indices[:, 0] adjacency_ind_1 = adjacency.indices[:, 1] vertex_features = tf.gather(x_flat, adjacency_ind_0) neighbor_features = tf.gather(x_flat, adjacency_ind_1) edge_features = edge_function(vertex_features, neighbor_features, **edge_function_kwargs) if reduction == "weighted": edge_features_weighted = edge_features * tf.expand_dims( adjacency.values, -1) features = tf.math.unsorted_segment_sum( data=edge_features_weighted, segment_ids=adjacency_ind_0, num_segments=tf.shape(input=x_flat)[0]) elif reduction == "max": features = tf.math.segment_max(data=edge_features, segment_ids=adjacency_ind_0) else: raise ValueError("The reduction method must be 'weighted' or 'max'") features.set_shape(features.shape.merge_with( (tf.compat.v1.dimension_value(x_flat.shape[0]), tf.compat.v1.dimension_value(edge_features.shape[-1])))) if data_ndims > 2: features = unflatten(features) return features # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/datasets/features/camera_feature_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for tensorflow_graphics.datasets.features.camera_feature.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf import tensorflow_datasets as tfds from tensorflow_graphics.datasets.features import camera_feature class CameraFeatureTest(tfds.testing.FeatureExpectationsTestCase): """Test Cases for Camera FeatureConnector.""" def __get_camera_params(self): pose = {'R': np.eye(3).astype(np.float32), 't': np.zeros(3).astype(np.float32)} f = 35. optical_center = (640 / 2, 480 / 2) return pose, f, optical_center def test_simple_camera(self): """Tests camera parameters with fixed focal length, no skew and no aspect ratio.""" expected_pose, expected_f, expected_center = self.__get_camera_params() expected_intrinsics = np.asarray([[expected_f, 0, expected_center[0]], [0, expected_f, expected_center[1]], [0, 0, 1]], dtype=np.float32) expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics} inputs = {'f': expected_f, 'optical_center': expected_center, 'pose': expected_pose} lookat_inputs = { 'f': expected_f, 'optical_center': expected_center, 'pose': { 'look_at': np.array([0, 0, -1], dtype=np.float32), 'up': np.array([0, 1, 0], dtype=np.float32), 'position': np.array([0, 0, 0], dtype=np.float32) } } raising_pose_entry = { 'f': expected_f, 'optical_center': expected_center, 'pose': np.eye(4) } raising_pose_inputs = { 'f': expected_f, 'optical_center': expected_center, 'pose': {'rot': np.eye(3), 'trans': np.zeros(3)} } raising_lookat_inputs = { 'f': expected_f, 'optical_center': expected_center, 'pose': { 'l': np.array([0, 0, -1], dtype=np.float32), 'up': np.array([0, 1, 0], dtype=np.float32), 'C': np.array([0, 0, 0], dtype=np.float32) } } self.assertFeature( feature=camera_feature.Camera(), shape={ 'pose': { 'R': (3, 3), 't': (3,) }, 'intrinsics': (3, 3) }, dtype={ 'pose': { 'R': tf.float32, 't': tf.float32 }, 'intrinsics': tf.float32 }, tests=[ tfds.testing.FeatureExpectationItem( value=inputs, expected=expected_camera, ), tfds.testing.FeatureExpectationItem( value=lookat_inputs, expected=expected_camera ), tfds.testing.FeatureExpectationItem( value=raising_pose_inputs, raise_cls=ValueError, raise_msg='Wrong keys for pose feature provided' ), tfds.testing.FeatureExpectationItem( value=raising_lookat_inputs, raise_cls=ValueError, raise_msg='Wrong keys for pose feature provided' ), tfds.testing.FeatureExpectationItem( value=raising_pose_entry, raise_cls=ValueError, raise_msg='Pose needs to be a dictionary' ), ], ) def test_camera_with_aspect_ratio_and_skew(self): """Tests camera parameters with fixed focal length, aspect_ratio and skew.""" expected_pose, expected_f, expected_center = self.__get_camera_params() expected_aspect_ratio = expected_center[0] / expected_center[1] expected_skew = 0.6 expected_intrinsics = np.asarray( [[expected_f, expected_skew, expected_center[0]], [0, expected_aspect_ratio * expected_f, expected_center[1]], [0, 0, 1]], dtype=np.float32) expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics} inputs = {'f': expected_f, 'optical_center': expected_center, 'skew': expected_skew, 'aspect_ratio': expected_aspect_ratio, 'pose': expected_pose} self.assertFeature( feature=camera_feature.Camera(), shape={ 'pose': { 'R': (3, 3), 't': (3,) }, 'intrinsics': (3, 3) }, dtype={ 'pose': { 'R': tf.float32, 't': tf.float32 }, 'intrinsics': tf.float32 }, tests=[ tfds.testing.FeatureExpectationItem( value=inputs, expected=expected_camera, ), ], ) def test_full_camera_calibration_matrix(self): """Tests camera parameters with different focal length per camera axis and skew.""" expected_pose, _, expected_optical_center = self.__get_camera_params() expected_skew = 0.6 expected_f = (35., 40.) expected_intrinsics = np.array( [[expected_f[0], expected_skew, expected_optical_center[0]], [0, expected_f[1], expected_optical_center[1]], [0, 0, 1]], dtype=np.float32) expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics} inputs = {'f': expected_f, 'optical_center': expected_optical_center, 'skew': expected_skew, 'pose': expected_pose} raising_inputs = {'f': expected_f, 'aspect_ratio': 1.5, 'optical_center': expected_optical_center, 'skew': expected_skew, 'pose': expected_pose} self.assertFeature( feature=camera_feature.Camera(), shape={ 'pose': { 'R': (3, 3), 't': (3,) }, 'intrinsics': (3, 3) }, dtype={ 'pose': { 'R': tf.float32, 't': tf.float32 }, 'intrinsics': tf.float32 }, tests=[ tfds.testing.FeatureExpectationItem( value=inputs, expected=expected_camera, ), tfds.testing.FeatureExpectationItem( value=raising_inputs, raise_cls=ValueError, raise_msg='If aspect ratio is provided, f needs to ' 'be a single float', ), ], ) if __name__ == '__main__': tfds.testing.test_main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for tensorflow_graphics.datasets.features.camera_feature.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf import tensorflow_datasets as tfds from tensorflow_graphics.datasets.features import camera_feature class CameraFeatureTest(tfds.testing.FeatureExpectationsTestCase): """Test Cases for Camera FeatureConnector.""" def __get_camera_params(self): pose = {'R': np.eye(3).astype(np.float32), 't': np.zeros(3).astype(np.float32)} f = 35. optical_center = (640 / 2, 480 / 2) return pose, f, optical_center def test_simple_camera(self): """Tests camera parameters with fixed focal length, no skew and no aspect ratio.""" expected_pose, expected_f, expected_center = self.__get_camera_params() expected_intrinsics = np.asarray([[expected_f, 0, expected_center[0]], [0, expected_f, expected_center[1]], [0, 0, 1]], dtype=np.float32) expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics} inputs = {'f': expected_f, 'optical_center': expected_center, 'pose': expected_pose} lookat_inputs = { 'f': expected_f, 'optical_center': expected_center, 'pose': { 'look_at': np.array([0, 0, -1], dtype=np.float32), 'up': np.array([0, 1, 0], dtype=np.float32), 'position': np.array([0, 0, 0], dtype=np.float32) } } raising_pose_entry = { 'f': expected_f, 'optical_center': expected_center, 'pose': np.eye(4) } raising_pose_inputs = { 'f': expected_f, 'optical_center': expected_center, 'pose': {'rot': np.eye(3), 'trans': np.zeros(3)} } raising_lookat_inputs = { 'f': expected_f, 'optical_center': expected_center, 'pose': { 'l': np.array([0, 0, -1], dtype=np.float32), 'up': np.array([0, 1, 0], dtype=np.float32), 'C': np.array([0, 0, 0], dtype=np.float32) } } self.assertFeature( feature=camera_feature.Camera(), shape={ 'pose': { 'R': (3, 3), 't': (3,) }, 'intrinsics': (3, 3) }, dtype={ 'pose': { 'R': tf.float32, 't': tf.float32 }, 'intrinsics': tf.float32 }, tests=[ tfds.testing.FeatureExpectationItem( value=inputs, expected=expected_camera, ), tfds.testing.FeatureExpectationItem( value=lookat_inputs, expected=expected_camera ), tfds.testing.FeatureExpectationItem( value=raising_pose_inputs, raise_cls=ValueError, raise_msg='Wrong keys for pose feature provided' ), tfds.testing.FeatureExpectationItem( value=raising_lookat_inputs, raise_cls=ValueError, raise_msg='Wrong keys for pose feature provided' ), tfds.testing.FeatureExpectationItem( value=raising_pose_entry, raise_cls=ValueError, raise_msg='Pose needs to be a dictionary' ), ], ) def test_camera_with_aspect_ratio_and_skew(self): """Tests camera parameters with fixed focal length, aspect_ratio and skew.""" expected_pose, expected_f, expected_center = self.__get_camera_params() expected_aspect_ratio = expected_center[0] / expected_center[1] expected_skew = 0.6 expected_intrinsics = np.asarray( [[expected_f, expected_skew, expected_center[0]], [0, expected_aspect_ratio * expected_f, expected_center[1]], [0, 0, 1]], dtype=np.float32) expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics} inputs = {'f': expected_f, 'optical_center': expected_center, 'skew': expected_skew, 'aspect_ratio': expected_aspect_ratio, 'pose': expected_pose} self.assertFeature( feature=camera_feature.Camera(), shape={ 'pose': { 'R': (3, 3), 't': (3,) }, 'intrinsics': (3, 3) }, dtype={ 'pose': { 'R': tf.float32, 't': tf.float32 }, 'intrinsics': tf.float32 }, tests=[ tfds.testing.FeatureExpectationItem( value=inputs, expected=expected_camera, ), ], ) def test_full_camera_calibration_matrix(self): """Tests camera parameters with different focal length per camera axis and skew.""" expected_pose, _, expected_optical_center = self.__get_camera_params() expected_skew = 0.6 expected_f = (35., 40.) expected_intrinsics = np.array( [[expected_f[0], expected_skew, expected_optical_center[0]], [0, expected_f[1], expected_optical_center[1]], [0, 0, 1]], dtype=np.float32) expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics} inputs = {'f': expected_f, 'optical_center': expected_optical_center, 'skew': expected_skew, 'pose': expected_pose} raising_inputs = {'f': expected_f, 'aspect_ratio': 1.5, 'optical_center': expected_optical_center, 'skew': expected_skew, 'pose': expected_pose} self.assertFeature( feature=camera_feature.Camera(), shape={ 'pose': { 'R': (3, 3), 't': (3,) }, 'intrinsics': (3, 3) }, dtype={ 'pose': { 'R': tf.float32, 't': tf.float32 }, 'intrinsics': tf.float32 }, tests=[ tfds.testing.FeatureExpectationItem( value=inputs, expected=expected_camera, ), tfds.testing.FeatureExpectationItem( value=raising_inputs, raise_cls=ValueError, raise_msg='If aspect ratio is provided, f needs to ' 'be a single float', ), ], ) if __name__ == '__main__': tfds.testing.test_main()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/rendering/voxels/emission_absorption.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements the emission absorption voxel rendering.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def render(voxels, absorption_factor=0.1, cell_size=1.0, axis=2, name=None): """Renders a voxel grid using the emission-absorption model, as described in ["Escaping Plato's Cave: 3D Shape From Adversarial Rendering" (Henzler 2019)](https://github.com/henzler/platonicgan). Note: In the following, A1 to An are optional batch dimensions. Args: voxels: A tensor of shape `[A1, ..., An, Vx, Vy, Vz, Vd]`, where Vx, Vy, Vz are the dimensions of the voxel grid and Vd the dimension of the information stored in each voxel (e.g. 3 for RGB color). absorption_factor: A scalar representing the density of the volume. cell_size: A scalar representing the size of a cell. axis: An index to the projection axis (0 for X, 1 for Y or 2 for Z). name: A name for this op. Defaults to "emission_absorption_render". Returns: A tensor of shape `[A1, ..., An, Vx, Vy, Vd]` representing images of size (Vx,Vy). Raises: ValueError: If the shape of the input tensors are not supported. """ with tf.compat.v1.name_scope(name, "emission_absorption_render", [voxels]): voxels = tf.convert_to_tensor(value=voxels) shape.check_static( tensor=voxels, tensor_name="voxels", has_rank_greater_than=3) if axis not in [0, 1, 2]: raise ValueError("'axis' needs to be 0, 1 or 2") signal, density = tf.split(voxels, (-1, 1), axis=-1) density = tf.scalar_mul(absorption_factor / cell_size, density) one_minus_density = tf.ones_like(density) - density transmission = tf.math.cumprod(one_minus_density, axis=axis - 4) weight = density * transmission weight_sum = tf.reduce_sum(input_tensor=weight, axis=axis - 4) rendering = tf.reduce_sum(input_tensor=weight * signal, axis=axis - 4) rendering = rendering / (weight_sum + 1e-8) transparency = tf.reduce_prod(input_tensor=one_minus_density, axis=axis - 4) alpha = tf.ones_like(transparency) - transparency rendering = rendering * alpha image = tf.concat([rendering, alpha], axis=-1) return image # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module implements the emission absorption voxel rendering.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape def render(voxels, absorption_factor=0.1, cell_size=1.0, axis=2, name=None): """Renders a voxel grid using the emission-absorption model, as described in ["Escaping Plato's Cave: 3D Shape From Adversarial Rendering" (Henzler 2019)](https://github.com/henzler/platonicgan). Note: In the following, A1 to An are optional batch dimensions. Args: voxels: A tensor of shape `[A1, ..., An, Vx, Vy, Vz, Vd]`, where Vx, Vy, Vz are the dimensions of the voxel grid and Vd the dimension of the information stored in each voxel (e.g. 3 for RGB color). absorption_factor: A scalar representing the density of the volume. cell_size: A scalar representing the size of a cell. axis: An index to the projection axis (0 for X, 1 for Y or 2 for Z). name: A name for this op. Defaults to "emission_absorption_render". Returns: A tensor of shape `[A1, ..., An, Vx, Vy, Vd]` representing images of size (Vx,Vy). Raises: ValueError: If the shape of the input tensors are not supported. """ with tf.compat.v1.name_scope(name, "emission_absorption_render", [voxels]): voxels = tf.convert_to_tensor(value=voxels) shape.check_static( tensor=voxels, tensor_name="voxels", has_rank_greater_than=3) if axis not in [0, 1, 2]: raise ValueError("'axis' needs to be 0, 1 or 2") signal, density = tf.split(voxels, (-1, 1), axis=-1) density = tf.scalar_mul(absorption_factor / cell_size, density) one_minus_density = tf.ones_like(density) - density transmission = tf.math.cumprod(one_minus_density, axis=axis - 4) weight = density * transmission weight_sum = tf.reduce_sum(input_tensor=weight, axis=axis - 4) rendering = tf.reduce_sum(input_tensor=weight * signal, axis=axis - 4) rendering = rendering / (weight_sum + 1e-8) transparency = tf.reduce_prod(input_tensor=one_minus_density, axis=axis - 4) alpha = tf.ones_like(transparency) - transparency rendering = rendering * alpha image = tf.concat([rendering, alpha], axis=-1) return image # API contains all public functions and classes. __all__ = export_api.get_functions_and_classes()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/nn/layer/tests/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/nn/metric/tests/recall_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the recall metric.""" from absl.testing import parameterized import numpy as np from tensorflow_graphics.nn.metric import recall from tensorflow_graphics.util import test_case def random_tensor(tensor_shape): return np.random.uniform(low=0.0, high=1.0, size=tensor_shape) def random_tensor_shape(): tensor_size = np.random.randint(5) + 1 return np.random.randint(1, 10, size=(tensor_size)).tolist() class RecallTest(test_case.TestCase): @parameterized.parameters( # recall = 0.25. ((0, 1, 1, 1, 1), (1, 1, 0, 0, 0), 0.25), # recall = 1. ((0, 0, 0, 1, 1, 1, 0, 1), (0, 0, 0, 1, 1, 1, 0, 1), 1), # All-0 predictions, returns 0. ((0, 1, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0), # All-0 ground truth, returns 0. ((0, 0, 0, 0, 0, 0), (0, 0, 0, 1, 0, 1), 0), ) def test_evaluate_preset(self, ground_truth, predictions, expected_recall): tensor_shape = random_tensor_shape() ground_truth_labels = np.tile(ground_truth, tensor_shape + [1]) predicted_labels = np.tile(predictions, tensor_shape + [1]) expected = np.tile(expected_recall, tensor_shape) result = recall.evaluate(ground_truth_labels, predicted_labels, classes=[1]) self.assertAllClose(expected, result) @parameterized.parameters( # Recall for classes 2, 3: [1/3, 1.] ((2, 0, 3, 1, 2, 2), (2, 3, 3, 2, 3, 3), [2, 3], False, [1. / 3, 1]), # Average recall for classes 2, 3: 2/3 ((2, 0, 3, 1, 2, 2), (2, 3, 3, 2, 3, 3), [2, 3], True, 2. / 3), # Recall for all classes: [0, 0, 0.5, 0.5] ((1, 2, 3, 3, 1, 1, 2), (0, 2, 0, 3, 0, 2, 0), None, False, [0, 0, 0.5, 0.5]), # Average recall for all classes: 0.25 ((1, 2, 3, 3, 1, 1, 2), (0, 2, 0, 3, 0, 2, 0), None, True, 0.25), ) def test_evaluate_preset_multiclass(self, ground_truth, predictions, classes, reduce_average, expected_recall): tensor_shape = random_tensor_shape() ground_truth_labels = np.tile(ground_truth, tensor_shape + [1]) predicted_labels = np.tile(predictions, tensor_shape + [1]) expected = np.tile(expected_recall, tensor_shape + ([1] if not reduce_average else [])) result = recall.evaluate(ground_truth_labels, predicted_labels, classes, reduce_average) self.assertAllClose(expected, result) @parameterized.parameters( ("Not all batch dimensions are broadcast-compatible.", (1, 5, 3), (4, 3)), ("Not all batch dimensions are broadcast-compatible.", (3, 4), (2, 4, 5)), ) def test_evaluate_shape_exception_raised(self, error_msg, *shape): """Tests that the shape exception is raised.""" self.assert_exception_is_raised(recall.evaluate, error_msg, shape) @parameterized.parameters( ((1, 5, 3), (2, 5, 1)), ((None, 2, 6), (4, 2, None)), ((3, 1, 1, 2), (3, 5, 8, 2)), ) def test_evaluate_shape_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(recall.evaluate, shapes) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the recall metric.""" from absl.testing import parameterized import numpy as np from tensorflow_graphics.nn.metric import recall from tensorflow_graphics.util import test_case def random_tensor(tensor_shape): return np.random.uniform(low=0.0, high=1.0, size=tensor_shape) def random_tensor_shape(): tensor_size = np.random.randint(5) + 1 return np.random.randint(1, 10, size=(tensor_size)).tolist() class RecallTest(test_case.TestCase): @parameterized.parameters( # recall = 0.25. ((0, 1, 1, 1, 1), (1, 1, 0, 0, 0), 0.25), # recall = 1. ((0, 0, 0, 1, 1, 1, 0, 1), (0, 0, 0, 1, 1, 1, 0, 1), 1), # All-0 predictions, returns 0. ((0, 1, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0), # All-0 ground truth, returns 0. ((0, 0, 0, 0, 0, 0), (0, 0, 0, 1, 0, 1), 0), ) def test_evaluate_preset(self, ground_truth, predictions, expected_recall): tensor_shape = random_tensor_shape() ground_truth_labels = np.tile(ground_truth, tensor_shape + [1]) predicted_labels = np.tile(predictions, tensor_shape + [1]) expected = np.tile(expected_recall, tensor_shape) result = recall.evaluate(ground_truth_labels, predicted_labels, classes=[1]) self.assertAllClose(expected, result) @parameterized.parameters( # Recall for classes 2, 3: [1/3, 1.] ((2, 0, 3, 1, 2, 2), (2, 3, 3, 2, 3, 3), [2, 3], False, [1. / 3, 1]), # Average recall for classes 2, 3: 2/3 ((2, 0, 3, 1, 2, 2), (2, 3, 3, 2, 3, 3), [2, 3], True, 2. / 3), # Recall for all classes: [0, 0, 0.5, 0.5] ((1, 2, 3, 3, 1, 1, 2), (0, 2, 0, 3, 0, 2, 0), None, False, [0, 0, 0.5, 0.5]), # Average recall for all classes: 0.25 ((1, 2, 3, 3, 1, 1, 2), (0, 2, 0, 3, 0, 2, 0), None, True, 0.25), ) def test_evaluate_preset_multiclass(self, ground_truth, predictions, classes, reduce_average, expected_recall): tensor_shape = random_tensor_shape() ground_truth_labels = np.tile(ground_truth, tensor_shape + [1]) predicted_labels = np.tile(predictions, tensor_shape + [1]) expected = np.tile(expected_recall, tensor_shape + ([1] if not reduce_average else [])) result = recall.evaluate(ground_truth_labels, predicted_labels, classes, reduce_average) self.assertAllClose(expected, result) @parameterized.parameters( ("Not all batch dimensions are broadcast-compatible.", (1, 5, 3), (4, 3)), ("Not all batch dimensions are broadcast-compatible.", (3, 4), (2, 4, 5)), ) def test_evaluate_shape_exception_raised(self, error_msg, *shape): """Tests that the shape exception is raised.""" self.assert_exception_is_raised(recall.evaluate, error_msg, shape) @parameterized.parameters( ((1, 5, 3), (2, 5, 1)), ((None, 2, 6), (4, 2, None)), ((3, 1, 1, 2), (3, 5, 8, 2)), ) def test_evaluate_shape_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(recall.evaluate, shapes) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/geometry/representation/mesh/tests/utils_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow_graphics.geometry.representation.mesh import utils from tensorflow_graphics.util import test_case class UtilsTest(test_case.TestCase): @parameterized.parameters( (np.array(((0, 1, 2),)), [[0, 1], [0, 2], [1, 2]]), (np.array( ((0, 1, 2), (0, 1, 3))), [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3]]), ) def test_extract_undirected_edges_from_triangular_mesh_preset( self, test_inputs, test_outputs): """Tests that the output contain the expected edges.""" edges = utils.extract_unique_edges_from_triangular_mesh( test_inputs, directed_edges=False) edges.sort(axis=1) # Ensure edge tuple ordered by first vertex. self.assertEqual(sorted(edges.tolist()), test_outputs) @parameterized.parameters( (np.array( ((0, 1, 2),)), [[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]]), (np.array( ((0, 1, 2), (0, 1, 3))), [[0, 1], [0, 2], [0, 3], [1, 0], [1, 2], [1, 3], [2, 0], [2, 1], [3, 0], [3, 1]]), ) def test_extract_directed_edges_from_triangular_mesh_preset( self, test_inputs, test_outputs): """Tests that the output contain the expected edges.""" edges = utils.extract_unique_edges_from_triangular_mesh( test_inputs, directed_edges=True) self.assertEqual(sorted(edges.tolist()), test_outputs) @parameterized.parameters( (1, "'faces' must be a numpy.ndarray."), (np.array((1,)), "must have a rank equal to 2"), (np.array((((1,),),)), "must have a rank equal to 2"), (np.array(((1,),)), "must have exactly 3 dimensions in the last axis"), (np.array(((1, 1),)), "must have exactly 3 dimensions in the last axis"), (np.array( ((1, 1, 1, 1),)), "must have exactly 3 dimensions in the last axis"), ) def test_extract_edges_from_triangular_mesh_raised( self, invalid_input, error_msg): """Tests that the shape exceptions are properly raised.""" with self.assertRaisesRegexp(ValueError, error_msg): utils.extract_unique_edges_from_triangular_mesh(invalid_input) @parameterized.parameters( (np.array(((0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1))), np.float16, [0.5, 0.5, 0.5, 0.5, 0.5, 0.5]), (np.array(((0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1))), np.float32, [0.5, 0.5, 0.5, 0.5, 0.5, 0.5]), (np.array(((0, 1), (0, 2), (0, 3), (1, 0), (1, 2), (1, 3), (2, 0), (2, 1), (3, 0), (3, 1))), np.float64, [1.0 / 3, 1.0 / 3, 1.0 / 3, 1.0 / 3, 1.0 / 3, 1.0 / 3, 0.5, 0.5, 0.5, 0.5]), ) def test_get_degree_based_edge_weights_preset( self, test_inputs, test_dtype, test_outputs): """Tests that the output contain the expected edges.""" weights = utils.get_degree_based_edge_weights(test_inputs, test_dtype) self.assertAllClose(weights.tolist(), test_outputs) @parameterized.parameters( (1, "'edges' must be a numpy.ndarray."), (np.array((1,)), "must have a rank equal to 2"), (np.array((((1,),),)), "must have a rank equal to 2"), (np.array(((1,),)), "must have exactly 2 dimensions in the last axis"), (np.array( ((1, 1, 1),)), "must have exactly 2 dimensions in the last axis"), ) def test_get_degree_based_edge_weights_invalid_edges_raised( self, invalid_input, error_msg): """Tests that the shape exceptions are properly raised.""" with self.assertRaisesRegexp(ValueError, error_msg): utils.get_degree_based_edge_weights(invalid_input) @parameterized.parameters( (np.bool, "must be a numpy float type"), (np.int, "must be a numpy float type"), (np.complex, "must be a numpy float type"), (np.uint, "must be a numpy float type"), (np.int16, "must be a numpy float type"), ) def test_get_degree_based_edge_weights_dtype_raised( self, invalid_type, error_msg): """Tests that the shape exceptions are properly raised.""" with self.assertRaisesRegexp(ValueError, error_msg): utils.get_degree_based_edge_weights(np.array(((1, 1),)), invalid_type) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow_graphics.geometry.representation.mesh import utils from tensorflow_graphics.util import test_case class UtilsTest(test_case.TestCase): @parameterized.parameters( (np.array(((0, 1, 2),)), [[0, 1], [0, 2], [1, 2]]), (np.array( ((0, 1, 2), (0, 1, 3))), [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3]]), ) def test_extract_undirected_edges_from_triangular_mesh_preset( self, test_inputs, test_outputs): """Tests that the output contain the expected edges.""" edges = utils.extract_unique_edges_from_triangular_mesh( test_inputs, directed_edges=False) edges.sort(axis=1) # Ensure edge tuple ordered by first vertex. self.assertEqual(sorted(edges.tolist()), test_outputs) @parameterized.parameters( (np.array( ((0, 1, 2),)), [[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]]), (np.array( ((0, 1, 2), (0, 1, 3))), [[0, 1], [0, 2], [0, 3], [1, 0], [1, 2], [1, 3], [2, 0], [2, 1], [3, 0], [3, 1]]), ) def test_extract_directed_edges_from_triangular_mesh_preset( self, test_inputs, test_outputs): """Tests that the output contain the expected edges.""" edges = utils.extract_unique_edges_from_triangular_mesh( test_inputs, directed_edges=True) self.assertEqual(sorted(edges.tolist()), test_outputs) @parameterized.parameters( (1, "'faces' must be a numpy.ndarray."), (np.array((1,)), "must have a rank equal to 2"), (np.array((((1,),),)), "must have a rank equal to 2"), (np.array(((1,),)), "must have exactly 3 dimensions in the last axis"), (np.array(((1, 1),)), "must have exactly 3 dimensions in the last axis"), (np.array( ((1, 1, 1, 1),)), "must have exactly 3 dimensions in the last axis"), ) def test_extract_edges_from_triangular_mesh_raised( self, invalid_input, error_msg): """Tests that the shape exceptions are properly raised.""" with self.assertRaisesRegexp(ValueError, error_msg): utils.extract_unique_edges_from_triangular_mesh(invalid_input) @parameterized.parameters( (np.array(((0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1))), np.float16, [0.5, 0.5, 0.5, 0.5, 0.5, 0.5]), (np.array(((0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1))), np.float32, [0.5, 0.5, 0.5, 0.5, 0.5, 0.5]), (np.array(((0, 1), (0, 2), (0, 3), (1, 0), (1, 2), (1, 3), (2, 0), (2, 1), (3, 0), (3, 1))), np.float64, [1.0 / 3, 1.0 / 3, 1.0 / 3, 1.0 / 3, 1.0 / 3, 1.0 / 3, 0.5, 0.5, 0.5, 0.5]), ) def test_get_degree_based_edge_weights_preset( self, test_inputs, test_dtype, test_outputs): """Tests that the output contain the expected edges.""" weights = utils.get_degree_based_edge_weights(test_inputs, test_dtype) self.assertAllClose(weights.tolist(), test_outputs) @parameterized.parameters( (1, "'edges' must be a numpy.ndarray."), (np.array((1,)), "must have a rank equal to 2"), (np.array((((1,),),)), "must have a rank equal to 2"), (np.array(((1,),)), "must have exactly 2 dimensions in the last axis"), (np.array( ((1, 1, 1),)), "must have exactly 2 dimensions in the last axis"), ) def test_get_degree_based_edge_weights_invalid_edges_raised( self, invalid_input, error_msg): """Tests that the shape exceptions are properly raised.""" with self.assertRaisesRegexp(ValueError, error_msg): utils.get_degree_based_edge_weights(invalid_input) @parameterized.parameters( (np.bool, "must be a numpy float type"), (np.int, "must be a numpy float type"), (np.complex, "must be a numpy float type"), (np.uint, "must be a numpy float type"), (np.int16, "must be a numpy float type"), ) def test_get_degree_based_edge_weights_dtype_raised( self, invalid_type, error_msg): """Tests that the shape exceptions are properly raised.""" with self.assertRaisesRegexp(ValueError, error_msg): utils.get_degree_based_edge_weights(np.array(((1, 1),)), invalid_type) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/projects/neural_voxel_renderer/prepare_tfrecords/README.md
# Dataset generation for Neural Voxel Renderer ___ The [training](https://colab.research.google.com/github/tensorflow/graphics/blob/master/tensorflow_graphics/projects/neural_voxel_renderer/train.ipynb) and [inference](https://colab.research.google.com/github/tensorflow/graphics/blob/master/tensorflow_graphics/projects/neural_voxel_renderer/demo.ipynb) examples use demo data to illustrate the functionality of Neural Voxel Renderer (NVR). In this document, we describe how to generate the the full dataset to train NVR from scratch. **Warning:** the generated TFRecords will take ~350GB of disk space. ___ ## Download the colored voxels This dataset contains the colored voxels of 2040 chairs. The size of the dataset is **~16GB**. Each shape is represented as 128<sup>3</sup> x 4 voxel grid, where each voxel contains an RGB and occupancy value. The color was obtained from a single image aligned with the voxels. ``` bash PATH_TO_COLOR_VOXELS=/tmp/colored_voxels/ mkdir $PATH_TO_COLOR_VOXELS bash download_colored_voxels.sh $PATH_TO_COLOR_VOXELS ``` ## Download the synthetic images The dataset contains the target images (rendered using Blender) and all the necessary information that was used to set-up the scene in 3D (object rotation, translation, camera parameters, etc.). The size of the dataset is **~400MB** ``` bash PATH_TO_SYNTHETIC_DATASET=/tmp/synthetic_dataset/ mkdir $PATH_TO_SYNTHETIC_DATASET wget -P $PATH_TO_SYNTHETIC_DATASET https://storage.googleapis.com/tensorflow-graphics/notebooks/neural_voxel_renderer/blender_dataset/default_chairs_test.tfrecord wget -P $PATH_TO_SYNTHETIC_DATASET https://storage.googleapis.com/tensorflow-graphics/notebooks/neural_voxel_renderer/blender_dataset/default_chairs_train.tfrecord ``` ## Run the script The script iterates over all the synthetic images and pairs them with the corresponding colored voxels, placed according to the scene set-up. Additionally, it estimates the rendered image directly from the voxels which is used as additional input in NVR plus. ``` python PATH_TO_TFRECORDS=/tmp/tfrecords/ mkdir $PATH_TO_TFRECORDS python generate_tfrecords_nvr_plus.py -- --mode test --voxels_dir $PATH_TO_COLOR_VOXELS --images_dir $PATH_TO_SYNTHETIC_DATASET --output_dir $PATH_TO_TFRECORDS python generate_tfrecords_nvr_plus.py -- --mode train --voxels_dir $PATH_TO_COLOR_VOXELS --images_dir $PATH_TO_SYNTHETIC_DATASET --output_dir $PATH_TO_TFRECORDS ```
# Dataset generation for Neural Voxel Renderer ___ The [training](https://colab.research.google.com/github/tensorflow/graphics/blob/master/tensorflow_graphics/projects/neural_voxel_renderer/train.ipynb) and [inference](https://colab.research.google.com/github/tensorflow/graphics/blob/master/tensorflow_graphics/projects/neural_voxel_renderer/demo.ipynb) examples use demo data to illustrate the functionality of Neural Voxel Renderer (NVR). In this document, we describe how to generate the the full dataset to train NVR from scratch. **Warning:** the generated TFRecords will take ~350GB of disk space. ___ ## Download the colored voxels This dataset contains the colored voxels of 2040 chairs. The size of the dataset is **~16GB**. Each shape is represented as 128<sup>3</sup> x 4 voxel grid, where each voxel contains an RGB and occupancy value. The color was obtained from a single image aligned with the voxels. ``` bash PATH_TO_COLOR_VOXELS=/tmp/colored_voxels/ mkdir $PATH_TO_COLOR_VOXELS bash download_colored_voxels.sh $PATH_TO_COLOR_VOXELS ``` ## Download the synthetic images The dataset contains the target images (rendered using Blender) and all the necessary information that was used to set-up the scene in 3D (object rotation, translation, camera parameters, etc.). The size of the dataset is **~400MB** ``` bash PATH_TO_SYNTHETIC_DATASET=/tmp/synthetic_dataset/ mkdir $PATH_TO_SYNTHETIC_DATASET wget -P $PATH_TO_SYNTHETIC_DATASET https://storage.googleapis.com/tensorflow-graphics/notebooks/neural_voxel_renderer/blender_dataset/default_chairs_test.tfrecord wget -P $PATH_TO_SYNTHETIC_DATASET https://storage.googleapis.com/tensorflow-graphics/notebooks/neural_voxel_renderer/blender_dataset/default_chairs_train.tfrecord ``` ## Run the script The script iterates over all the synthetic images and pairs them with the corresponding colored voxels, placed according to the scene set-up. Additionally, it estimates the rendered image directly from the voxels which is used as additional input in NVR plus. ``` python PATH_TO_TFRECORDS=/tmp/tfrecords/ mkdir $PATH_TO_TFRECORDS python generate_tfrecords_nvr_plus.py -- --mode test --voxels_dir $PATH_TO_COLOR_VOXELS --images_dir $PATH_TO_SYNTHETIC_DATASET --output_dir $PATH_TO_TFRECORDS python generate_tfrecords_nvr_plus.py -- --mode train --voxels_dir $PATH_TO_COLOR_VOXELS --images_dir $PATH_TO_SYNTHETIC_DATASET --output_dir $PATH_TO_TFRECORDS ```
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/math/optimizer/tests/__init__.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./.git/refs/heads/master
7cf23d1e4bb39b169839443db5bcf17d508a55c8
7cf23d1e4bb39b169839443db5bcf17d508a55c8
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/datasets/modelnet40/modelnet40_makefakes.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Generates fake data for testing.""" import os from absl import app from absl import flags import h5py import numpy as np flags.DEFINE_string("fakes_path", ".", "path where files will be generated") FLAGS = flags.FLAGS def main(argv): """Generates files with the internal structure. Args: argv: the path where to generate the fake files Reference: f = h5py.File("modelnet40_ply_hdf5_2048/ply_data_train0.h5", "r") print(f['data']) # <HDF5 dataset "data": shape(2048, 2048, 3), type "<f4"> print(f['label']) # <HDF5 dataset "label": shape(2048, 1), type "|u1"> """ if len(argv) != 1: raise app.UsageError("One argument required.") for i in range(3): fake_points = np.random.randn(8, 2048, 3).astype(np.float32) fake_label = np.random.uniform(low=0, high=40, size=(8, 1)).astype(np.uint8) path = os.path.join(FLAGS.fakes_path, "ply_data_train{}.h5".format(i)) with h5py.File(path, "w") as h5f: h5f.create_dataset("data", data=fake_points) h5f.create_dataset("label", data=fake_label) for i in range(2): fake_points = np.random.randn(8, 2048, 3).astype(np.float32) fake_label = np.random.uniform(low=0, high=40, size=(8, 1)).astype(np.uint8) path = os.path.join(FLAGS.fakes_path, "ply_data_test{}.h5".format(i)) with h5py.File(path, "w") as h5f: h5f.create_dataset("data", data=fake_points) h5f.create_dataset("label", data=fake_label) if __name__ == "__main__": app.run(main)
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Generates fake data for testing.""" import os from absl import app from absl import flags import h5py import numpy as np flags.DEFINE_string("fakes_path", ".", "path where files will be generated") FLAGS = flags.FLAGS def main(argv): """Generates files with the internal structure. Args: argv: the path where to generate the fake files Reference: f = h5py.File("modelnet40_ply_hdf5_2048/ply_data_train0.h5", "r") print(f['data']) # <HDF5 dataset "data": shape(2048, 2048, 3), type "<f4"> print(f['label']) # <HDF5 dataset "label": shape(2048, 1), type "|u1"> """ if len(argv) != 1: raise app.UsageError("One argument required.") for i in range(3): fake_points = np.random.randn(8, 2048, 3).astype(np.float32) fake_label = np.random.uniform(low=0, high=40, size=(8, 1)).astype(np.uint8) path = os.path.join(FLAGS.fakes_path, "ply_data_train{}.h5".format(i)) with h5py.File(path, "w") as h5f: h5f.create_dataset("data", data=fake_points) h5f.create_dataset("label", data=fake_label) for i in range(2): fake_points = np.random.randn(8, 2048, 3).astype(np.float32) fake_label = np.random.uniform(low=0, high=40, size=(8, 1)).astype(np.uint8) path = os.path.join(FLAGS.fakes_path, "ply_data_test{}.h5".format(i)) with h5py.File(path, "w") as h5f: h5f.create_dataset("data", data=fake_points) h5f.create_dataset("label", data=fake_label) if __name__ == "__main__": app.run(main)
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/rendering/voxels/tests/test_helpers.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test helpers for the voxels module.""" import numpy as np def generate_random_test_voxels_render(): """Generates random test for the voxels rendering functions.""" batch_shape = np.random.randint(1, 3) voxels_shape = np.random.randint(2, 8, size=(3)).tolist() signals_dimension = np.random.randint(2, 4) random_voxels = np.random.uniform(size=[batch_shape] + voxels_shape + [signals_dimension]) return random_voxels def generate_preset_test_voxels_visual_hull_render(): """Generates preset test for the visual hull voxels rendering function.""" voxels = np.array([[[[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0.3, 0.7, 0.1], [1, 0.1, 0], [0.2, 0.1, 0.8]], [[0.1, 0.9, 0], [0.2, 1, 0.4], [0.3, 0.2, 0]]], [[[0.15, 0.69, 0.57], [0.07, 0.33, 0.55], [0, 0, 0]], [[0.71, 0.61, 0.43], [1, 0.1, 0], [0.71, 0.61, 0.43]], [[0.17, 0.01, 1.22], [0.2, 1, 0.4], [0.67, 0.94, 0.14]]], [[[0, 0, 0], [0.17, 0.33, 0.55], [0, 0, 0]], [[0.71, 0.61, 0.43], [1, 0.1, 0], [0.71, 0.61, 0.43]], [[0.1, 0.9, 0], [0.2, 1, 0.4], [0.88, 0.09, 0.45]]], [[[1, 0, 0], [0, 0, 0], [1, 0, 0]], [[0.88, 0.09, 0.5], [0.71, 0.61, 0.4], [0.14, 0, 0.22]], [[0.71, 0.61, 0.45], [0.71, 0.7, 0.43], [0.3, 0.2, 0]]]], [[[[0, 0, 0], [0, 0, 0], [0, 0, 1]], [[0.3, 0.7, 0.1], [0.15, 0.69, 0.5], [0.88, 0.09, 0.45]], [[0.07, 0.33, 0.55], [0.2, 1, 0.4], [0.4, 0.34, 0.43]]], [[[0, 1, 0], [0, 1, 0], [0, 1, 0]], [[0.19, 0.06, 0.24], [1, 0.1, 0], [0.2, 0.1, 0.8]], [[0.67, 0.94, 0.14], [0.2, 1, 0.4], [0.15, 0.69, 0.57]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0.74, 0.67, 0.4], [0.64, 0.8, 0.19], [0.9, 0.6, 0.48]], [[0.1, 0.9, 0], [0.02, 0.37, 0.56], [0.62, 0.98, 0.19]]], [[[0.04, 0.87, 0.37], [0, 0, 0], [1, 0, 0]], [[0.3, 0.7, 0.1], [0.24, 0.12, 0.7], [0.76, 0.64, 0.79]], [[0.7, 0.2, 0.2], [0.4, 1, 0.9], [0.19, 0.66, 0.03]]]]]) images = np.array([[[[0, 0, 0], [0.77686984, 0.59343034, 0.59343034], [0.45118836, 0.87754357, 0.32967995]], [[0.19748120, 0.63940506, 0.67372021], [0.91107838, 0.73286470, 0.57683792], [0.64654532, 0.85772593, 0.82795514]], [[0.15633518, 0.28107627, 0.42305019], [0.91107838, 0.73286470, 0.57683792], [0.69272126, 0.86330457, 0.57258507]], [[0.86466472, 0, 0], [0.82271559, 0.50341470, 0.67372021], [0.82093385, 0.77909002, 0.58521709]]], [[[0, 0, 0.63212055], [0.73552274, 0.77236231, 0.65006225], [0.48829142, 0.81175293, 0.74842145]], [[0, 0.950212931, 0], [0.75092470, 0.22894841, 0.64654532], [0.63940506, 0.92792154, 0.67044104]], [[0, 0, 0], [0.89771579, 0.87381422, 0.65699148], [0.52288608, 0.89460078, 0.52763345]], [[0.64654532, 0.58104845, 0.30926567], [0.72746821, 0.76776373, 0.79607439], [0.724729, 0.844327, 0.676967]]]]) # pyformat: disable return voxels, images def generate_preset_test_voxels_absorption_render(): """Generates preset test for the absorption voxels rendering function.""" voxels = np.array([[[[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0.3, 0.7, 0.1], [1, 0.1, 0], [0.2, 0.1, 0.8]], [[0.1, 0.9, 0], [0.2, 1, 0.4], [0.3, 0.2, 0]]], [[[0.15, 0.69, 0.57], [0.07, 0.33, 0.55], [0, 0, 0]], [[0.71, 0.61, 0.43], [1, 0.1, 0], [0.71, 0.61, 0.43]], [[0.17, 0.01, 1.22], [0.2, 1, 0.4], [0.67, 0.94, 0.14]]], [[[0, 0, 0], [0.17, 0.33, 0.55], [0, 0, 0]], [[0.71, 0.61, 0.43], [1, 0.1, 0], [0.71, 0.61, 0.43]], [[0.1, 0.9, 0], [0.2, 1, 0.4], [0.88, 0.09, 0.45]]], [[[1, 0, 0], [0, 0, 0], [1, 0, 0]], [[0.88, 0.09, 0.5], [0.71, 0.61, 0.4], [0.14, 0, 0.22]], [[0.71, 0.61, 0.45], [0.71, 0.7, 0.43], [0.3, 0.2, 0]]]], [[[[0, 0, 0], [0, 0, 0], [0, 0, 1]], [[0.3, 0.7, 0.1], [0.15, 0.69, 0.5], [0.88, 0.09, 0.45]], [[0.07, 0.33, 0.55], [0.2, 1, 0.4], [0.4, 0.34, 0.43]]], [[[0, 1, 0], [0, 1, 0], [0, 1, 0]], [[0.19, 0.06, 0.24], [1, 0.1, 0], [0.2, 0.1, 0.8]], [[0.67, 0.94, 0.14], [0.2, 1, 0.4], [0.15, 0.69, 0.57]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0.74, 0.67, 0.4], [0.64, 0.8, 0.19], [0.9, 0.6, 0.48]], [[0.1, 0.9, 0], [0.02, 0.37, 0.56], [0.62, 0.98, 0.19]]], [[[0.04, 0.87, 0.37], [0, 0, 0], [1, 0, 0]], [[0.3, 0.7, 0.1], [0.24, 0.12, 0.7], [0.76, 0.64, 0.79]], [[0.7, 0.2, 0.2], [0.4, 1, 0.9], [0.19, 0.66, 0.03]]]]]) images = np.array([[[[0, 0, 0], [0.6175, 0.413375, 0.43], [0.27325, 0.7525, 0.2]], [[0.107375, 0.453075, 0.481625], [0.7919875, 0.54112625, 0.383775], [0.4523725, 0.736325, 0.70984]], [[0.085, 0.165, 0.275], [0.7919875, 0.54112625, 0.383775], [0.5212, 0.737375, 0.38]], [[0.75, 0, 0], [0.664084, 0.336275, 0.466], [0.64637875, 0.593425, 0.391625]]], [[[0, 0, 0.5], [0.5597, 0.59340875, 0.4478125], [0.3052, 0.653475, 0.5447]], [[0, 0.875, 0], [0.59275, 0.124575, 0.472], [0.4463875, 0.826425, 0.46804]], [[0, 0, 0], [0.76438, 0.7207, 0.44976], [0.351055, 0.7713925, 0.3484]], [[0.51, 0.435, 0.185], [0.53624, 0.58452, 0.6264125], [0.5294, 0.6985, 0.512425]]]]) # pyformat: disable return voxels, images def generate_preset_test_voxels_emission_absorption_render(): """Generates preset test for the emission absorption voxels rendering function.""" voxels = np.array([[[[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0.3, 0.7, 0.1], [1, 0.1, 0], [0.2, 0.1, 0.8]], [[0.1, 0.9, 0], [0.2, 1, 0.4], [0.3, 0.2, 0]]], [[[0.15, 0.69, 0.57], [0.07, 0.33, 0.55], [0, 0, 0]], [[0.71, 0.61, 0.43], [1, 0.1, 0], [0.71, 0.61, 0.43]], [[0.17, 0.01, 1.22], [0.2, 1, 0.4], [0.67, 0.94, 0.14]]], [[[0, 0, 0], [0.17, 0.33, 0.55], [0, 0, 0]], [[0.71, 0.61, 0.43], [1, 0.1, 0], [0.71, 0.61, 0.43]], [[0.1, 0.9, 0], [0.2, 1, 0.4], [0.88, 0.09, 0.45]]], [[[1, 0, 0], [0, 0, 0], [1, 0, 0]], [[0.88, 0.09, 0.5], [0.71, 0.61, 0.4], [0.14, 0, 0.22]], [[0.71, 0.61, 0.45], [0.71, 0.7, 0.43], [0.3, 0.2, 0]]]], [[[[0, 0, 0], [0, 0, 0], [0, 0, 1]], [[0.3, 0.7, 0.1], [0.15, 0.69, 0.5], [0.88, 0.09, 0.45]], [[0.07, 0.33, 0.55], [0.2, 1, 0.4], [0.4, 0.34, 0.43]]], [[[0, 1, 0], [0, 1, 0], [0, 1, 0]], [[0.19, 0.06, 0.24], [1, 0.1, 0], [0.2, 0.1, 0.8]], [[0.67, 0.94, 0.14], [0.2, 1, 0.4], [0.15, 0.69, 0.57]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0.74, 0.67, 0.4], [0.64, 0.8, 0.19], [0.9, 0.6, 0.48]], [[0.1, 0.9, 0], [0.02, 0.37, 0.56], [0.62, 0.98, 0.19]]], [[[0.04, 0.87, 0.37], [0, 0, 0], [1, 0, 0]], [[0.3, 0.7, 0.1], [0.24, 0.12, 0.7], [0.76, 0.64, 0.79]], [[0.7, 0.2, 0.2], [0.4, 1, 0.9], [0.19, 0.66, 0.03]]]]]) images = np.array([[[[0, 0, 0], [0.19553845, 0.27123076, 0.82], [0.08, 0.39999998, 0.4]], [[0.10144142, 0.46858389, 0.8065], [0.47932099, 0.41181099, 0.6751], [0.22078022, 0.23262935, 1.11352]], [[0.0935, 0.18149999, 0.55], [0.47932099, 0.41181099, 0.6751], [0.30814825, 0.43694864, 0.67]], [[0, 0, 0], [0.5677705, 0.17392569, 0.766], [0.48741499, 0.44055107, 0.6865]]], [[[0, 0, 1], [0.28019208, 0.40287539, 0.7525], [0.13121746, 0.42573205, 0.8461]], [[0, 0, 0], [0.16451199, 0.064448, 0.848], [0.24191167, 0.69841443, 0.77812]], [[0, 0, 0], [0.56974806, 0.50646416, 0.74728], [0.09611898, 0.32276643, 0.6436]], [[0.0148, 0.32189999, 0.37], [0.3099809, 0.33312645, 0.9433], [0.55598098, 0.41542985, 0.9224]]]]) # pyformat: disable return voxels, images
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test helpers for the voxels module.""" import numpy as np def generate_random_test_voxels_render(): """Generates random test for the voxels rendering functions.""" batch_shape = np.random.randint(1, 3) voxels_shape = np.random.randint(2, 8, size=(3)).tolist() signals_dimension = np.random.randint(2, 4) random_voxels = np.random.uniform(size=[batch_shape] + voxels_shape + [signals_dimension]) return random_voxels def generate_preset_test_voxels_visual_hull_render(): """Generates preset test for the visual hull voxels rendering function.""" voxels = np.array([[[[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0.3, 0.7, 0.1], [1, 0.1, 0], [0.2, 0.1, 0.8]], [[0.1, 0.9, 0], [0.2, 1, 0.4], [0.3, 0.2, 0]]], [[[0.15, 0.69, 0.57], [0.07, 0.33, 0.55], [0, 0, 0]], [[0.71, 0.61, 0.43], [1, 0.1, 0], [0.71, 0.61, 0.43]], [[0.17, 0.01, 1.22], [0.2, 1, 0.4], [0.67, 0.94, 0.14]]], [[[0, 0, 0], [0.17, 0.33, 0.55], [0, 0, 0]], [[0.71, 0.61, 0.43], [1, 0.1, 0], [0.71, 0.61, 0.43]], [[0.1, 0.9, 0], [0.2, 1, 0.4], [0.88, 0.09, 0.45]]], [[[1, 0, 0], [0, 0, 0], [1, 0, 0]], [[0.88, 0.09, 0.5], [0.71, 0.61, 0.4], [0.14, 0, 0.22]], [[0.71, 0.61, 0.45], [0.71, 0.7, 0.43], [0.3, 0.2, 0]]]], [[[[0, 0, 0], [0, 0, 0], [0, 0, 1]], [[0.3, 0.7, 0.1], [0.15, 0.69, 0.5], [0.88, 0.09, 0.45]], [[0.07, 0.33, 0.55], [0.2, 1, 0.4], [0.4, 0.34, 0.43]]], [[[0, 1, 0], [0, 1, 0], [0, 1, 0]], [[0.19, 0.06, 0.24], [1, 0.1, 0], [0.2, 0.1, 0.8]], [[0.67, 0.94, 0.14], [0.2, 1, 0.4], [0.15, 0.69, 0.57]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0.74, 0.67, 0.4], [0.64, 0.8, 0.19], [0.9, 0.6, 0.48]], [[0.1, 0.9, 0], [0.02, 0.37, 0.56], [0.62, 0.98, 0.19]]], [[[0.04, 0.87, 0.37], [0, 0, 0], [1, 0, 0]], [[0.3, 0.7, 0.1], [0.24, 0.12, 0.7], [0.76, 0.64, 0.79]], [[0.7, 0.2, 0.2], [0.4, 1, 0.9], [0.19, 0.66, 0.03]]]]]) images = np.array([[[[0, 0, 0], [0.77686984, 0.59343034, 0.59343034], [0.45118836, 0.87754357, 0.32967995]], [[0.19748120, 0.63940506, 0.67372021], [0.91107838, 0.73286470, 0.57683792], [0.64654532, 0.85772593, 0.82795514]], [[0.15633518, 0.28107627, 0.42305019], [0.91107838, 0.73286470, 0.57683792], [0.69272126, 0.86330457, 0.57258507]], [[0.86466472, 0, 0], [0.82271559, 0.50341470, 0.67372021], [0.82093385, 0.77909002, 0.58521709]]], [[[0, 0, 0.63212055], [0.73552274, 0.77236231, 0.65006225], [0.48829142, 0.81175293, 0.74842145]], [[0, 0.950212931, 0], [0.75092470, 0.22894841, 0.64654532], [0.63940506, 0.92792154, 0.67044104]], [[0, 0, 0], [0.89771579, 0.87381422, 0.65699148], [0.52288608, 0.89460078, 0.52763345]], [[0.64654532, 0.58104845, 0.30926567], [0.72746821, 0.76776373, 0.79607439], [0.724729, 0.844327, 0.676967]]]]) # pyformat: disable return voxels, images def generate_preset_test_voxels_absorption_render(): """Generates preset test for the absorption voxels rendering function.""" voxels = np.array([[[[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0.3, 0.7, 0.1], [1, 0.1, 0], [0.2, 0.1, 0.8]], [[0.1, 0.9, 0], [0.2, 1, 0.4], [0.3, 0.2, 0]]], [[[0.15, 0.69, 0.57], [0.07, 0.33, 0.55], [0, 0, 0]], [[0.71, 0.61, 0.43], [1, 0.1, 0], [0.71, 0.61, 0.43]], [[0.17, 0.01, 1.22], [0.2, 1, 0.4], [0.67, 0.94, 0.14]]], [[[0, 0, 0], [0.17, 0.33, 0.55], [0, 0, 0]], [[0.71, 0.61, 0.43], [1, 0.1, 0], [0.71, 0.61, 0.43]], [[0.1, 0.9, 0], [0.2, 1, 0.4], [0.88, 0.09, 0.45]]], [[[1, 0, 0], [0, 0, 0], [1, 0, 0]], [[0.88, 0.09, 0.5], [0.71, 0.61, 0.4], [0.14, 0, 0.22]], [[0.71, 0.61, 0.45], [0.71, 0.7, 0.43], [0.3, 0.2, 0]]]], [[[[0, 0, 0], [0, 0, 0], [0, 0, 1]], [[0.3, 0.7, 0.1], [0.15, 0.69, 0.5], [0.88, 0.09, 0.45]], [[0.07, 0.33, 0.55], [0.2, 1, 0.4], [0.4, 0.34, 0.43]]], [[[0, 1, 0], [0, 1, 0], [0, 1, 0]], [[0.19, 0.06, 0.24], [1, 0.1, 0], [0.2, 0.1, 0.8]], [[0.67, 0.94, 0.14], [0.2, 1, 0.4], [0.15, 0.69, 0.57]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0.74, 0.67, 0.4], [0.64, 0.8, 0.19], [0.9, 0.6, 0.48]], [[0.1, 0.9, 0], [0.02, 0.37, 0.56], [0.62, 0.98, 0.19]]], [[[0.04, 0.87, 0.37], [0, 0, 0], [1, 0, 0]], [[0.3, 0.7, 0.1], [0.24, 0.12, 0.7], [0.76, 0.64, 0.79]], [[0.7, 0.2, 0.2], [0.4, 1, 0.9], [0.19, 0.66, 0.03]]]]]) images = np.array([[[[0, 0, 0], [0.6175, 0.413375, 0.43], [0.27325, 0.7525, 0.2]], [[0.107375, 0.453075, 0.481625], [0.7919875, 0.54112625, 0.383775], [0.4523725, 0.736325, 0.70984]], [[0.085, 0.165, 0.275], [0.7919875, 0.54112625, 0.383775], [0.5212, 0.737375, 0.38]], [[0.75, 0, 0], [0.664084, 0.336275, 0.466], [0.64637875, 0.593425, 0.391625]]], [[[0, 0, 0.5], [0.5597, 0.59340875, 0.4478125], [0.3052, 0.653475, 0.5447]], [[0, 0.875, 0], [0.59275, 0.124575, 0.472], [0.4463875, 0.826425, 0.46804]], [[0, 0, 0], [0.76438, 0.7207, 0.44976], [0.351055, 0.7713925, 0.3484]], [[0.51, 0.435, 0.185], [0.53624, 0.58452, 0.6264125], [0.5294, 0.6985, 0.512425]]]]) # pyformat: disable return voxels, images def generate_preset_test_voxels_emission_absorption_render(): """Generates preset test for the emission absorption voxels rendering function.""" voxels = np.array([[[[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0.3, 0.7, 0.1], [1, 0.1, 0], [0.2, 0.1, 0.8]], [[0.1, 0.9, 0], [0.2, 1, 0.4], [0.3, 0.2, 0]]], [[[0.15, 0.69, 0.57], [0.07, 0.33, 0.55], [0, 0, 0]], [[0.71, 0.61, 0.43], [1, 0.1, 0], [0.71, 0.61, 0.43]], [[0.17, 0.01, 1.22], [0.2, 1, 0.4], [0.67, 0.94, 0.14]]], [[[0, 0, 0], [0.17, 0.33, 0.55], [0, 0, 0]], [[0.71, 0.61, 0.43], [1, 0.1, 0], [0.71, 0.61, 0.43]], [[0.1, 0.9, 0], [0.2, 1, 0.4], [0.88, 0.09, 0.45]]], [[[1, 0, 0], [0, 0, 0], [1, 0, 0]], [[0.88, 0.09, 0.5], [0.71, 0.61, 0.4], [0.14, 0, 0.22]], [[0.71, 0.61, 0.45], [0.71, 0.7, 0.43], [0.3, 0.2, 0]]]], [[[[0, 0, 0], [0, 0, 0], [0, 0, 1]], [[0.3, 0.7, 0.1], [0.15, 0.69, 0.5], [0.88, 0.09, 0.45]], [[0.07, 0.33, 0.55], [0.2, 1, 0.4], [0.4, 0.34, 0.43]]], [[[0, 1, 0], [0, 1, 0], [0, 1, 0]], [[0.19, 0.06, 0.24], [1, 0.1, 0], [0.2, 0.1, 0.8]], [[0.67, 0.94, 0.14], [0.2, 1, 0.4], [0.15, 0.69, 0.57]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0.74, 0.67, 0.4], [0.64, 0.8, 0.19], [0.9, 0.6, 0.48]], [[0.1, 0.9, 0], [0.02, 0.37, 0.56], [0.62, 0.98, 0.19]]], [[[0.04, 0.87, 0.37], [0, 0, 0], [1, 0, 0]], [[0.3, 0.7, 0.1], [0.24, 0.12, 0.7], [0.76, 0.64, 0.79]], [[0.7, 0.2, 0.2], [0.4, 1, 0.9], [0.19, 0.66, 0.03]]]]]) images = np.array([[[[0, 0, 0], [0.19553845, 0.27123076, 0.82], [0.08, 0.39999998, 0.4]], [[0.10144142, 0.46858389, 0.8065], [0.47932099, 0.41181099, 0.6751], [0.22078022, 0.23262935, 1.11352]], [[0.0935, 0.18149999, 0.55], [0.47932099, 0.41181099, 0.6751], [0.30814825, 0.43694864, 0.67]], [[0, 0, 0], [0.5677705, 0.17392569, 0.766], [0.48741499, 0.44055107, 0.6865]]], [[[0, 0, 1], [0.28019208, 0.40287539, 0.7525], [0.13121746, 0.42573205, 0.8461]], [[0, 0, 0], [0.16451199, 0.064448, 0.848], [0.24191167, 0.69841443, 0.77812]], [[0, 0, 0], [0.56974806, 0.50646416, 0.74728], [0.09611898, 0.32276643, 0.6436]], [[0.0148, 0.32189999, 0.37], [0.3099809, 0.33312645, 0.9433], [0.55598098, 0.41542985, 0.9224]]]]) # pyformat: disable return voxels, images
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/geometry/transformation/tests/rotation_matrix_2d_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for 2d rotation matrix.""" from absl.testing import flagsaver from absl.testing import parameterized import numpy as np from tensorflow_graphics.geometry.transformation import rotation_matrix_2d from tensorflow_graphics.geometry.transformation.tests import test_data as td from tensorflow_graphics.geometry.transformation.tests import test_helpers from tensorflow_graphics.util import test_case class RotationMatrix2dTest(test_case.TestCase): @parameterized.parameters( ((1,)), ((None, 1),), ) def test_from_euler_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(rotation_matrix_2d.from_euler, shapes) @parameterized.parameters( ("must have exactly 1 dimensions in axis -1", (None,)),) def test_from_euler_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(rotation_matrix_2d.from_euler, error_msg, shapes) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_euler_jacobian_preset(self): """Test the Jacobian of the from_euler function.""" x_init = test_helpers.generate_preset_test_euler_angles(dimensions=1) self.assert_jacobian_is_correct_fn(rotation_matrix_2d.from_euler, [x_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_euler_jacobian_random(self): """Test the Jacobian of the from_euler function.""" x_init = test_helpers.generate_random_test_euler_angles(dimensions=1) self.assert_jacobian_is_correct_fn(rotation_matrix_2d.from_euler, [x_init]) def test_from_euler_normalized_preset(self): """Tests that an angle maps to correct matrix.""" euler_angles = test_helpers.generate_preset_test_euler_angles(dimensions=1) matrix = rotation_matrix_2d.from_euler(euler_angles) self.assertAllEqual( rotation_matrix_2d.is_valid(matrix), np.ones(euler_angles.shape[0:-1] + (1,), dtype=bool)) @parameterized.parameters( ((td.ANGLE_0,), (td.MAT_2D_ID,)), ((td.ANGLE_45,), (td.MAT_2D_45,)), ((td.ANGLE_90,), (td.MAT_2D_90,)), ((td.ANGLE_180,), (td.MAT_2D_180,)), ) def test_from_euler_preset(self, test_inputs, test_outputs): """Tests that an angle maps to correct matrix.""" self.assert_output_is_correct(rotation_matrix_2d.from_euler, test_inputs, test_outputs) @parameterized.parameters( ((1,),), ((None, 1),), ) def test_from_euler_with_small_angles_approximation_exception_not_raised( self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised( rotation_matrix_2d.from_euler_with_small_angles_approximation, shapes) @parameterized.parameters( ("must have exactly 1 dimensions in axis -1", (None,)),) def test_from_euler_with_small_angles_approximation_exception_raised( self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised( rotation_matrix_2d.from_euler_with_small_angles_approximation, error_msg, shape) def test_from_euler_with_small_angles_approximation_random(self): """Tests small_angles approximation by comparing to exact calculation.""" # Only generate small angles. For a test tolerance of 1e-3, 0.17 was found # empirically to be the range where the small angle approximation works. random_euler_angles = test_helpers.generate_random_test_euler_angles( min_angle=-0.17, max_angle=0.17, dimensions=1) exact_matrix = rotation_matrix_2d.from_euler(random_euler_angles) approximate_matrix = ( rotation_matrix_2d.from_euler_with_small_angles_approximation( random_euler_angles)) self.assertAllClose(exact_matrix, approximate_matrix, atol=1e-3) @parameterized.parameters( ((2, 2),), ((None, 2, 2),), ) def test_inverse_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(rotation_matrix_2d.inverse, shapes) @parameterized.parameters( ("must have a rank greater than 1", (2,)), ("must have exactly 2 dimensions in axis -1", (2, None)), ("must have exactly 2 dimensions in axis -2", (None, 2)), ) def test_inverse_exception_raised(self, error_msg, *shapes): """Checks the inputs of the inverse function.""" self.assert_exception_is_raised(rotation_matrix_2d.inverse, error_msg, shapes) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_inverse_jacobian_preset(self): """Test the Jacobian of the inverse function.""" x_init = test_helpers.generate_preset_test_rotation_matrices_2d() self.assert_jacobian_is_correct_fn(rotation_matrix_2d.inverse, [x_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_inverse_jacobian_random(self): """Test the Jacobian of the inverse function.""" x_init = test_helpers.generate_random_test_rotation_matrix_2d() self.assert_jacobian_is_correct_fn(rotation_matrix_2d.inverse, [x_init]) def test_inverse_random(self): """Checks that inverting rotated points results in no transformation.""" random_euler_angles = test_helpers.generate_random_test_euler_angles( dimensions=1) tensor_shape = random_euler_angles.shape[:-1] random_matrix = rotation_matrix_2d.from_euler(random_euler_angles) random_point = np.random.normal(size=tensor_shape + (2,)) rotated_random_points = rotation_matrix_2d.rotate(random_point, random_matrix) predicted_invert_random_matrix = rotation_matrix_2d.inverse(random_matrix) predicted_invert_rotated_random_points = rotation_matrix_2d.rotate( rotated_random_points, predicted_invert_random_matrix) self.assertAllClose( random_point, predicted_invert_rotated_random_points, rtol=1e-6) @parameterized.parameters( ((2, 2),), ((None, 2, 2),), ) def test_is_valid_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(rotation_matrix_2d.inverse, shapes) @parameterized.parameters( ("must have a rank greater than 1", (2,)), ("must have exactly 2 dimensions in axis -1", (2, None)), ("must have exactly 2 dimensions in axis -2", (None, 2)), ) def test_is_valid_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(rotation_matrix_2d.is_valid, error_msg, shape) @parameterized.parameters( ((2,), (2, 2)), ((None, 2), (None, 2, 2)), ((1, 2), (1, 2, 2)), ((2, 2), (2, 2, 2)), ((2,), (1, 2, 2)), ((1, 2), (2, 2)), ) def test_rotate_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(rotation_matrix_2d.rotate, shapes) @parameterized.parameters( ("must have exactly 2 dimensions in axis -1", (None,), (2, 2)), ("must have a rank greater than 1", (2,), (2,)), ("must have exactly 2 dimensions in axis -1", (2,), (2, None)), ("must have exactly 2 dimensions in axis -2", (2,), (None, 2)), ) def test_rotate_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(rotation_matrix_2d.rotate, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_rotate_jacobian_preset(self): """Test the Jacobian of the rotate function.""" x_matrix_init = test_helpers.generate_preset_test_rotation_matrices_2d() tensor_shape = x_matrix_init.shape[:-2] + (2,) x_point_init = np.random.uniform(size=tensor_shape) self.assert_jacobian_is_correct_fn(rotation_matrix_2d.rotate, [x_point_init, x_matrix_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_rotate_jacobian_random(self): """Test the Jacobian of the rotate function.""" x_matrix_init = test_helpers.generate_random_test_rotation_matrix_2d() tensor_shape = x_matrix_init.shape[:-2] + (2,) x_point_init = np.random.uniform(size=tensor_shape) self.assert_jacobian_is_correct_fn(rotation_matrix_2d.rotate, [x_point_init, x_matrix_init]) @parameterized.parameters( ((td.AXIS_2D_0, td.ANGLE_90), (td.AXIS_2D_0,)), ((td.AXIS_2D_X, td.ANGLE_90), (td.AXIS_2D_Y,)), ) def test_rotate_preset(self, test_inputs, test_outputs): """Tests that the rotate function correctly rotates points.""" def func(test_point, test_angle): random_matrix = rotation_matrix_2d.from_euler(test_angle) return rotation_matrix_2d.rotate(test_point, random_matrix) self.assert_output_is_correct(func, test_inputs, test_outputs) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for 2d rotation matrix.""" from absl.testing import flagsaver from absl.testing import parameterized import numpy as np from tensorflow_graphics.geometry.transformation import rotation_matrix_2d from tensorflow_graphics.geometry.transformation.tests import test_data as td from tensorflow_graphics.geometry.transformation.tests import test_helpers from tensorflow_graphics.util import test_case class RotationMatrix2dTest(test_case.TestCase): @parameterized.parameters( ((1,)), ((None, 1),), ) def test_from_euler_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(rotation_matrix_2d.from_euler, shapes) @parameterized.parameters( ("must have exactly 1 dimensions in axis -1", (None,)),) def test_from_euler_exception_raised(self, error_msg, *shapes): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(rotation_matrix_2d.from_euler, error_msg, shapes) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_euler_jacobian_preset(self): """Test the Jacobian of the from_euler function.""" x_init = test_helpers.generate_preset_test_euler_angles(dimensions=1) self.assert_jacobian_is_correct_fn(rotation_matrix_2d.from_euler, [x_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_euler_jacobian_random(self): """Test the Jacobian of the from_euler function.""" x_init = test_helpers.generate_random_test_euler_angles(dimensions=1) self.assert_jacobian_is_correct_fn(rotation_matrix_2d.from_euler, [x_init]) def test_from_euler_normalized_preset(self): """Tests that an angle maps to correct matrix.""" euler_angles = test_helpers.generate_preset_test_euler_angles(dimensions=1) matrix = rotation_matrix_2d.from_euler(euler_angles) self.assertAllEqual( rotation_matrix_2d.is_valid(matrix), np.ones(euler_angles.shape[0:-1] + (1,), dtype=bool)) @parameterized.parameters( ((td.ANGLE_0,), (td.MAT_2D_ID,)), ((td.ANGLE_45,), (td.MAT_2D_45,)), ((td.ANGLE_90,), (td.MAT_2D_90,)), ((td.ANGLE_180,), (td.MAT_2D_180,)), ) def test_from_euler_preset(self, test_inputs, test_outputs): """Tests that an angle maps to correct matrix.""" self.assert_output_is_correct(rotation_matrix_2d.from_euler, test_inputs, test_outputs) @parameterized.parameters( ((1,),), ((None, 1),), ) def test_from_euler_with_small_angles_approximation_exception_not_raised( self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised( rotation_matrix_2d.from_euler_with_small_angles_approximation, shapes) @parameterized.parameters( ("must have exactly 1 dimensions in axis -1", (None,)),) def test_from_euler_with_small_angles_approximation_exception_raised( self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised( rotation_matrix_2d.from_euler_with_small_angles_approximation, error_msg, shape) def test_from_euler_with_small_angles_approximation_random(self): """Tests small_angles approximation by comparing to exact calculation.""" # Only generate small angles. For a test tolerance of 1e-3, 0.17 was found # empirically to be the range where the small angle approximation works. random_euler_angles = test_helpers.generate_random_test_euler_angles( min_angle=-0.17, max_angle=0.17, dimensions=1) exact_matrix = rotation_matrix_2d.from_euler(random_euler_angles) approximate_matrix = ( rotation_matrix_2d.from_euler_with_small_angles_approximation( random_euler_angles)) self.assertAllClose(exact_matrix, approximate_matrix, atol=1e-3) @parameterized.parameters( ((2, 2),), ((None, 2, 2),), ) def test_inverse_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(rotation_matrix_2d.inverse, shapes) @parameterized.parameters( ("must have a rank greater than 1", (2,)), ("must have exactly 2 dimensions in axis -1", (2, None)), ("must have exactly 2 dimensions in axis -2", (None, 2)), ) def test_inverse_exception_raised(self, error_msg, *shapes): """Checks the inputs of the inverse function.""" self.assert_exception_is_raised(rotation_matrix_2d.inverse, error_msg, shapes) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_inverse_jacobian_preset(self): """Test the Jacobian of the inverse function.""" x_init = test_helpers.generate_preset_test_rotation_matrices_2d() self.assert_jacobian_is_correct_fn(rotation_matrix_2d.inverse, [x_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_inverse_jacobian_random(self): """Test the Jacobian of the inverse function.""" x_init = test_helpers.generate_random_test_rotation_matrix_2d() self.assert_jacobian_is_correct_fn(rotation_matrix_2d.inverse, [x_init]) def test_inverse_random(self): """Checks that inverting rotated points results in no transformation.""" random_euler_angles = test_helpers.generate_random_test_euler_angles( dimensions=1) tensor_shape = random_euler_angles.shape[:-1] random_matrix = rotation_matrix_2d.from_euler(random_euler_angles) random_point = np.random.normal(size=tensor_shape + (2,)) rotated_random_points = rotation_matrix_2d.rotate(random_point, random_matrix) predicted_invert_random_matrix = rotation_matrix_2d.inverse(random_matrix) predicted_invert_rotated_random_points = rotation_matrix_2d.rotate( rotated_random_points, predicted_invert_random_matrix) self.assertAllClose( random_point, predicted_invert_rotated_random_points, rtol=1e-6) @parameterized.parameters( ((2, 2),), ((None, 2, 2),), ) def test_is_valid_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(rotation_matrix_2d.inverse, shapes) @parameterized.parameters( ("must have a rank greater than 1", (2,)), ("must have exactly 2 dimensions in axis -1", (2, None)), ("must have exactly 2 dimensions in axis -2", (None, 2)), ) def test_is_valid_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(rotation_matrix_2d.is_valid, error_msg, shape) @parameterized.parameters( ((2,), (2, 2)), ((None, 2), (None, 2, 2)), ((1, 2), (1, 2, 2)), ((2, 2), (2, 2, 2)), ((2,), (1, 2, 2)), ((1, 2), (2, 2)), ) def test_rotate_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(rotation_matrix_2d.rotate, shapes) @parameterized.parameters( ("must have exactly 2 dimensions in axis -1", (None,), (2, 2)), ("must have a rank greater than 1", (2,), (2,)), ("must have exactly 2 dimensions in axis -1", (2,), (2, None)), ("must have exactly 2 dimensions in axis -2", (2,), (None, 2)), ) def test_rotate_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are properly raised.""" self.assert_exception_is_raised(rotation_matrix_2d.rotate, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_rotate_jacobian_preset(self): """Test the Jacobian of the rotate function.""" x_matrix_init = test_helpers.generate_preset_test_rotation_matrices_2d() tensor_shape = x_matrix_init.shape[:-2] + (2,) x_point_init = np.random.uniform(size=tensor_shape) self.assert_jacobian_is_correct_fn(rotation_matrix_2d.rotate, [x_point_init, x_matrix_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_rotate_jacobian_random(self): """Test the Jacobian of the rotate function.""" x_matrix_init = test_helpers.generate_random_test_rotation_matrix_2d() tensor_shape = x_matrix_init.shape[:-2] + (2,) x_point_init = np.random.uniform(size=tensor_shape) self.assert_jacobian_is_correct_fn(rotation_matrix_2d.rotate, [x_point_init, x_matrix_init]) @parameterized.parameters( ((td.AXIS_2D_0, td.ANGLE_90), (td.AXIS_2D_0,)), ((td.AXIS_2D_X, td.ANGLE_90), (td.AXIS_2D_Y,)), ) def test_rotate_preset(self, test_inputs, test_outputs): """Tests that the rotate function correctly rotates points.""" def func(test_point, test_angle): random_matrix = rotation_matrix_2d.from_euler(test_angle) return rotation_matrix_2d.rotate(test_point, random_matrix) self.assert_output_is_correct(func, test_inputs, test_outputs) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/rendering/kernels/rasterize_triangles_impl.cc
/* Copyright 2020 The TensorFlow Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "py/tensorflow_graphics/rendering/kernels/rasterize_triangles_impl.h" #include <algorithm> #include <cmath> namespace { using fixed_t = int64; // Converts to fixed point with 16 fractional bits and 48 integer bits. // TODO(fcole): fixed-point depth may be too shallow. // The algorithm requires multiplying two of the xyzw clip-space coordinates // together, summing, and then multiplying by an NDC pixel coordinate (three // total multiplies). After three multiplications, the fractional part will be // 48 bits, leaving 16 bits for the integer part. The NDC pixel coordinates // are in (-1,1) so they need only 1 integer bit, so as long as the values of // the inverse matrix are < 2^15, the fixed-point math should not overflow. This // seems a bit dicey but so far all the tests I've tried pass. constexpr int kFractionalBits = 16; constexpr fixed_t ShiftPointLeft(fixed_t x) { return x << kFractionalBits; } constexpr fixed_t ToFixedPoint(float f) { return static_cast<fixed_t>(f * ShiftPointLeft(1)); } // Takes the minimum of a and b, rounds down, and converts to an integer in // the range [low, high]. inline int ClampedIntegerMin(float a, float b, int low, int high) { const float value = std::floor(std::min(a, b)); return static_cast<int>( std::clamp(value, static_cast<float>(low), static_cast<float>(high))); } // Takes the maximum of a and b, rounds up, and converts to an integer in the // range [low, high]. inline int ClampedIntegerMax(float a, float b, int low, int high) { const float value = std::ceil(std::max(a, b)); return static_cast<int>( std::clamp(value, static_cast<float>(low), static_cast<float>(high))); } // Return true if the near plane is between the eye and the clip-space point // with the provided z and w. inline bool IsClipPointVisible(float z, float w) { return w > 0 && z >= -w; } // Computes the screen-space bounding box of the given clip-space triangle and // stores it into [left, right, bottom, top], where left and bottom limits are // inclusive while right and top are not. // Returns true if the bounding box includes any screen pixels. bool ComputeTriangleBoundingBox(float v0x, float v0y, float v0z, float v0w, float v1x, float v1y, float v1z, float v1w, float v2x, float v2y, float v2z, float v2w, int image_width, int image_height, int* left, int* right, int* bottom, int* top) { // If the triangle is entirely visible, project the vertices to pixel // coordinates and find the triangle bounding box enlarged to the nearest // integer and clamped to the image boundaries. If the triangle is not // entirely visible, intersect the edges that cross the near plane with the // near plane and use those to compute screen bounds instead. *left = image_width; *right = 0; *bottom = image_height; *top = 0; auto add_point = [&](float x, float y, float w) { const float px = 0.5f * (x / w + 1) * image_width; const float py = 0.5f * (y / w + 1) * image_height; *left = ClampedIntegerMin(*left, px, 0, image_width); *right = ClampedIntegerMax(*right, px, 0, image_width); *bottom = ClampedIntegerMin(*bottom, py, 0, image_height); *top = ClampedIntegerMax(*top, py, 0, image_height); }; auto add_near_point = [&](float x0, float y0, float z0, float w0, float x1, float y1, float z1, float w1) { const float denom = z0 - z1 + w0 - w1; if (denom != 0) { // Interpolate to near plane, where z/w == -1. const float t = (z0 + w0) / denom; const float x = x0 + t * (x1 - x0); const float y = y0 + t * (y1 - y0); const float w = w0 + t * (w1 - w0); add_point(x, y, w); } }; const bool visible_v0 = IsClipPointVisible(v0z, v0w); const bool visible_v1 = IsClipPointVisible(v1z, v1w); const bool visible_v2 = IsClipPointVisible(v2z, v2w); if (visible_v0) { add_point(v0x, v0y, v0w); if (!visible_v1) add_near_point(v0x, v0y, v0z, v0w, v1x, v1y, v1z, v1w); if (!visible_v2) add_near_point(v0x, v0y, v0z, v0w, v2x, v2y, v2z, v2w); } if (visible_v1) { add_point(v1x, v1y, v1w); if (!visible_v2) add_near_point(v1x, v1y, v1z, v1w, v2x, v2y, v2z, v2w); if (!visible_v0) add_near_point(v1x, v1y, v1z, v1w, v0x, v0y, v0z, v0w); } if (visible_v2) { add_point(v2x, v2y, v2w); if (!visible_v0) add_near_point(v2x, v2y, v2z, v2w, v0x, v0y, v0z, v0w); if (!visible_v1) add_near_point(v2x, v2y, v2z, v2w, v1x, v1y, v1z, v1w); } const bool is_valid = (*right > *left) && (*top > *bottom); return is_valid; } // Computes a 3x3 matrix inverse without dividing by the determinant. // Instead, makes an unnormalized matrix inverse with the correct sign // by flipping the sign of the matrix if the determinant is negative. // By leaving out determinant division, the rows of M^-1 only depend on two out // of three of the columns of M; i.e., the first row of M^-1 only depends on the // second and third columns of M, the second only depends on the first and // third, etc. This means we can compute edge functions for two neighboring // triangles independently and produce exactly the same numerical result up to // the sign. // See http://mathworld.wolfram.com/MatrixInverse.html // Culling is accomplished by inspecting the sign of the determinant as in: // "Incremental and Hierarchical Hilbert Order Edge Equation Polygon // Rasterization," McCool, et al., 2001 void ComputeUnnormalizedMatrixInverse( const fixed_t a11, const fixed_t a12, const fixed_t a13, const fixed_t a21, const fixed_t a22, const fixed_t a23, const fixed_t a31, const fixed_t a32, const fixed_t a33, const FaceCullingMode culling_mode, fixed_t m_inv[9]) { m_inv[0] = a22 * a33 - a32 * a23; m_inv[1] = a13 * a32 - a33 * a12; m_inv[2] = a12 * a23 - a22 * a13; m_inv[3] = a23 * a31 - a33 * a21; m_inv[4] = a11 * a33 - a31 * a13; m_inv[5] = a13 * a21 - a23 * a11; m_inv[6] = a21 * a32 - a31 * a22; m_inv[7] = a12 * a31 - a32 * a11; m_inv[8] = a11 * a22 - a21 * a12; // If the culling mode is kBack, leave the sign of the matrix unchanged. // Transfer the sign of the determinant if mode is kNone. If mode is kFront, // just invert the matrix. if (culling_mode == FaceCullingMode::kNone || culling_mode == FaceCullingMode::kFront) { // The first column of the unnormalized M^-1 contains intermediate values // for det(M). const float det = a11 * m_inv[0] + a12 * m_inv[3] + a13 * m_inv[6]; const float multiplier = (culling_mode == FaceCullingMode::kNone) ? std::copysign(1.0, det) : -1.0; for (int i = 0; i < 9; ++i) { m_inv[i] *= multiplier; } } } // Computes the edge functions from M^-1 as described by Olano and Greer, // "Triangle Scan Conversion using 2D Homogeneous Coordinates." // // This function combines equations (3) and (4). It first computes // [a b c] = u_i * M^-1, where u_0 = [1 0 0], u_1 = [0 1 0], etc., // then computes edge_i = aX + bY + c void ComputeEdgeFunctions(const float px, const float py, const fixed_t m_inv[9], fixed_t values[3]) { const fixed_t px_i = ToFixedPoint(px); const fixed_t py_i = ToFixedPoint(py); for (int i = 0; i < 3; ++i) { const fixed_t a = m_inv[3 * i + 0]; const fixed_t b = m_inv[3 * i + 1]; const fixed_t c = m_inv[3 * i + 2]; // Before summing, shift the point of c to align with the products of // multiplication. values[i] = a * px_i + b * py_i + ShiftPointLeft(c); } } // Determines whether the point p lies inside a triangle. Counts pixels exactly // on an edge as inside the triangle, as long as the triangle is not degenerate. // Degenerate (zero-area) triangles always fail the inside test. bool PixelIsInsideTriangle(const fixed_t edge_values[3]) { // Check that the edge values are all non-negative and that at least one is // positive (triangle is non-degenerate). return (edge_values[0] >= 0 && edge_values[1] >= 0 && edge_values[2] >= 0) && (edge_values[0] > 0 || edge_values[1] > 0 || edge_values[2] > 0); } } // namespace void RasterizeTrianglesImpl(const float* vertices, const int32* triangles, int32 triangle_count, int32 image_width, int32 image_height, int32 num_layers, FaceCullingMode face_culling_mode, int32* triangle_ids, float* z_buffer, float* barycentric_coordinates) { const float half_image_width = 0.5f * image_width; const float half_image_height = 0.5f * image_height; fixed_t unnormalized_matrix_inverse[9]; fixed_t b_over_w[3]; int left, right, bottom, top; for (int32 triangle_id = 0; triangle_id < triangle_count; ++triangle_id) { const int32 v0_x_id = 4 * triangles[3 * triangle_id]; const int32 v1_x_id = 4 * triangles[3 * triangle_id + 1]; const int32 v2_x_id = 4 * triangles[3 * triangle_id + 2]; const float v0x = vertices[v0_x_id]; const float v0y = vertices[v0_x_id + 1]; const float v0z = vertices[v0_x_id + 2]; const float v0w = vertices[v0_x_id + 3]; const float v1x = vertices[v1_x_id]; const float v1y = vertices[v1_x_id + 1]; const float v1z = vertices[v1_x_id + 2]; const float v1w = vertices[v1_x_id + 3]; const float v2x = vertices[v2_x_id]; const float v2y = vertices[v2_x_id + 1]; const float v2z = vertices[v2_x_id + 2]; const float v2w = vertices[v2_x_id + 3]; const bool is_valid = ComputeTriangleBoundingBox( v0x, v0y, v0z, v0w, v1x, v1y, v1z, v1w, v2x, v2y, v2z, v2w, image_width, image_height, &left, &right, &bottom, &top); // Ignore triangles that do not overlap with any screen pixels. if (!is_valid) continue; ComputeUnnormalizedMatrixInverse( ToFixedPoint(v0x), ToFixedPoint(v1x), ToFixedPoint(v2x), ToFixedPoint(v0y), ToFixedPoint(v1y), ToFixedPoint(v2y), ToFixedPoint(v0w), ToFixedPoint(v1w), ToFixedPoint(v2w), face_culling_mode, unnormalized_matrix_inverse); // Iterate over each pixel in the bounding box. for (int iy = bottom; iy < top; ++iy) { for (int ix = left; ix < right; ++ix) { const float px = ((ix + 0.5f) / half_image_width) - 1.0f; const float py = ((iy + 0.5f) / half_image_height) - 1.0f; ComputeEdgeFunctions(px, py, unnormalized_matrix_inverse, b_over_w); if (!PixelIsInsideTriangle(b_over_w)) { continue; } const float one_over_w = b_over_w[0] + b_over_w[1] + b_over_w[2]; const float b0 = b_over_w[0] / one_over_w; const float b1 = b_over_w[1] / one_over_w; const float b2 = b_over_w[2] / one_over_w; // Since we computed an unnormalized w above, we need to recompute // a properly scaled clip-space w value and then divide clip-space z // by that. const float clip_z = b0 * v0z + b1 * v1z + b2 * v2z; const float clip_w = b0 * v0w + b1 * v1w + b2 * v2w; const float z = clip_z / clip_w; // Skip the pixel if it is beyond the near or far clipping plane. if (z < -1.0f || z > 1.0f) continue; // Insert into appropriate depth layer with insertion sort. float z_next = z; int32 id_next = triangle_id; float b0_next = b0; float b1_next = b1; float b2_next = b2; const int pixel_idx0 = iy * image_width + ix; for (int layer = 0; layer < num_layers; ++layer) { const int pixel_idx = pixel_idx0 + image_height * image_width * layer; if (z_next < z_buffer[pixel_idx]) { std::swap(z_next, z_buffer[pixel_idx]); std::swap(id_next, triangle_ids[pixel_idx]); if (barycentric_coordinates != nullptr) { std::swap(b0_next, barycentric_coordinates[3 * pixel_idx + 0]); std::swap(b1_next, barycentric_coordinates[3 * pixel_idx + 1]); std::swap(b2_next, barycentric_coordinates[3 * pixel_idx + 2]); } } // Exit the loop early if the clear depth (z == 1) is reached. if (z_next == 1) break; } } } } }
/* Copyright 2020 The TensorFlow Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "py/tensorflow_graphics/rendering/kernels/rasterize_triangles_impl.h" #include <algorithm> #include <cmath> namespace { using fixed_t = int64; // Converts to fixed point with 16 fractional bits and 48 integer bits. // TODO(fcole): fixed-point depth may be too shallow. // The algorithm requires multiplying two of the xyzw clip-space coordinates // together, summing, and then multiplying by an NDC pixel coordinate (three // total multiplies). After three multiplications, the fractional part will be // 48 bits, leaving 16 bits for the integer part. The NDC pixel coordinates // are in (-1,1) so they need only 1 integer bit, so as long as the values of // the inverse matrix are < 2^15, the fixed-point math should not overflow. This // seems a bit dicey but so far all the tests I've tried pass. constexpr int kFractionalBits = 16; constexpr fixed_t ShiftPointLeft(fixed_t x) { return x << kFractionalBits; } constexpr fixed_t ToFixedPoint(float f) { return static_cast<fixed_t>(f * ShiftPointLeft(1)); } // Takes the minimum of a and b, rounds down, and converts to an integer in // the range [low, high]. inline int ClampedIntegerMin(float a, float b, int low, int high) { const float value = std::floor(std::min(a, b)); return static_cast<int>( std::clamp(value, static_cast<float>(low), static_cast<float>(high))); } // Takes the maximum of a and b, rounds up, and converts to an integer in the // range [low, high]. inline int ClampedIntegerMax(float a, float b, int low, int high) { const float value = std::ceil(std::max(a, b)); return static_cast<int>( std::clamp(value, static_cast<float>(low), static_cast<float>(high))); } // Return true if the near plane is between the eye and the clip-space point // with the provided z and w. inline bool IsClipPointVisible(float z, float w) { return w > 0 && z >= -w; } // Computes the screen-space bounding box of the given clip-space triangle and // stores it into [left, right, bottom, top], where left and bottom limits are // inclusive while right and top are not. // Returns true if the bounding box includes any screen pixels. bool ComputeTriangleBoundingBox(float v0x, float v0y, float v0z, float v0w, float v1x, float v1y, float v1z, float v1w, float v2x, float v2y, float v2z, float v2w, int image_width, int image_height, int* left, int* right, int* bottom, int* top) { // If the triangle is entirely visible, project the vertices to pixel // coordinates and find the triangle bounding box enlarged to the nearest // integer and clamped to the image boundaries. If the triangle is not // entirely visible, intersect the edges that cross the near plane with the // near plane and use those to compute screen bounds instead. *left = image_width; *right = 0; *bottom = image_height; *top = 0; auto add_point = [&](float x, float y, float w) { const float px = 0.5f * (x / w + 1) * image_width; const float py = 0.5f * (y / w + 1) * image_height; *left = ClampedIntegerMin(*left, px, 0, image_width); *right = ClampedIntegerMax(*right, px, 0, image_width); *bottom = ClampedIntegerMin(*bottom, py, 0, image_height); *top = ClampedIntegerMax(*top, py, 0, image_height); }; auto add_near_point = [&](float x0, float y0, float z0, float w0, float x1, float y1, float z1, float w1) { const float denom = z0 - z1 + w0 - w1; if (denom != 0) { // Interpolate to near plane, where z/w == -1. const float t = (z0 + w0) / denom; const float x = x0 + t * (x1 - x0); const float y = y0 + t * (y1 - y0); const float w = w0 + t * (w1 - w0); add_point(x, y, w); } }; const bool visible_v0 = IsClipPointVisible(v0z, v0w); const bool visible_v1 = IsClipPointVisible(v1z, v1w); const bool visible_v2 = IsClipPointVisible(v2z, v2w); if (visible_v0) { add_point(v0x, v0y, v0w); if (!visible_v1) add_near_point(v0x, v0y, v0z, v0w, v1x, v1y, v1z, v1w); if (!visible_v2) add_near_point(v0x, v0y, v0z, v0w, v2x, v2y, v2z, v2w); } if (visible_v1) { add_point(v1x, v1y, v1w); if (!visible_v2) add_near_point(v1x, v1y, v1z, v1w, v2x, v2y, v2z, v2w); if (!visible_v0) add_near_point(v1x, v1y, v1z, v1w, v0x, v0y, v0z, v0w); } if (visible_v2) { add_point(v2x, v2y, v2w); if (!visible_v0) add_near_point(v2x, v2y, v2z, v2w, v0x, v0y, v0z, v0w); if (!visible_v1) add_near_point(v2x, v2y, v2z, v2w, v1x, v1y, v1z, v1w); } const bool is_valid = (*right > *left) && (*top > *bottom); return is_valid; } // Computes a 3x3 matrix inverse without dividing by the determinant. // Instead, makes an unnormalized matrix inverse with the correct sign // by flipping the sign of the matrix if the determinant is negative. // By leaving out determinant division, the rows of M^-1 only depend on two out // of three of the columns of M; i.e., the first row of M^-1 only depends on the // second and third columns of M, the second only depends on the first and // third, etc. This means we can compute edge functions for two neighboring // triangles independently and produce exactly the same numerical result up to // the sign. // See http://mathworld.wolfram.com/MatrixInverse.html // Culling is accomplished by inspecting the sign of the determinant as in: // "Incremental and Hierarchical Hilbert Order Edge Equation Polygon // Rasterization," McCool, et al., 2001 void ComputeUnnormalizedMatrixInverse( const fixed_t a11, const fixed_t a12, const fixed_t a13, const fixed_t a21, const fixed_t a22, const fixed_t a23, const fixed_t a31, const fixed_t a32, const fixed_t a33, const FaceCullingMode culling_mode, fixed_t m_inv[9]) { m_inv[0] = a22 * a33 - a32 * a23; m_inv[1] = a13 * a32 - a33 * a12; m_inv[2] = a12 * a23 - a22 * a13; m_inv[3] = a23 * a31 - a33 * a21; m_inv[4] = a11 * a33 - a31 * a13; m_inv[5] = a13 * a21 - a23 * a11; m_inv[6] = a21 * a32 - a31 * a22; m_inv[7] = a12 * a31 - a32 * a11; m_inv[8] = a11 * a22 - a21 * a12; // If the culling mode is kBack, leave the sign of the matrix unchanged. // Transfer the sign of the determinant if mode is kNone. If mode is kFront, // just invert the matrix. if (culling_mode == FaceCullingMode::kNone || culling_mode == FaceCullingMode::kFront) { // The first column of the unnormalized M^-1 contains intermediate values // for det(M). const float det = a11 * m_inv[0] + a12 * m_inv[3] + a13 * m_inv[6]; const float multiplier = (culling_mode == FaceCullingMode::kNone) ? std::copysign(1.0, det) : -1.0; for (int i = 0; i < 9; ++i) { m_inv[i] *= multiplier; } } } // Computes the edge functions from M^-1 as described by Olano and Greer, // "Triangle Scan Conversion using 2D Homogeneous Coordinates." // // This function combines equations (3) and (4). It first computes // [a b c] = u_i * M^-1, where u_0 = [1 0 0], u_1 = [0 1 0], etc., // then computes edge_i = aX + bY + c void ComputeEdgeFunctions(const float px, const float py, const fixed_t m_inv[9], fixed_t values[3]) { const fixed_t px_i = ToFixedPoint(px); const fixed_t py_i = ToFixedPoint(py); for (int i = 0; i < 3; ++i) { const fixed_t a = m_inv[3 * i + 0]; const fixed_t b = m_inv[3 * i + 1]; const fixed_t c = m_inv[3 * i + 2]; // Before summing, shift the point of c to align with the products of // multiplication. values[i] = a * px_i + b * py_i + ShiftPointLeft(c); } } // Determines whether the point p lies inside a triangle. Counts pixels exactly // on an edge as inside the triangle, as long as the triangle is not degenerate. // Degenerate (zero-area) triangles always fail the inside test. bool PixelIsInsideTriangle(const fixed_t edge_values[3]) { // Check that the edge values are all non-negative and that at least one is // positive (triangle is non-degenerate). return (edge_values[0] >= 0 && edge_values[1] >= 0 && edge_values[2] >= 0) && (edge_values[0] > 0 || edge_values[1] > 0 || edge_values[2] > 0); } } // namespace void RasterizeTrianglesImpl(const float* vertices, const int32* triangles, int32 triangle_count, int32 image_width, int32 image_height, int32 num_layers, FaceCullingMode face_culling_mode, int32* triangle_ids, float* z_buffer, float* barycentric_coordinates) { const float half_image_width = 0.5f * image_width; const float half_image_height = 0.5f * image_height; fixed_t unnormalized_matrix_inverse[9]; fixed_t b_over_w[3]; int left, right, bottom, top; for (int32 triangle_id = 0; triangle_id < triangle_count; ++triangle_id) { const int32 v0_x_id = 4 * triangles[3 * triangle_id]; const int32 v1_x_id = 4 * triangles[3 * triangle_id + 1]; const int32 v2_x_id = 4 * triangles[3 * triangle_id + 2]; const float v0x = vertices[v0_x_id]; const float v0y = vertices[v0_x_id + 1]; const float v0z = vertices[v0_x_id + 2]; const float v0w = vertices[v0_x_id + 3]; const float v1x = vertices[v1_x_id]; const float v1y = vertices[v1_x_id + 1]; const float v1z = vertices[v1_x_id + 2]; const float v1w = vertices[v1_x_id + 3]; const float v2x = vertices[v2_x_id]; const float v2y = vertices[v2_x_id + 1]; const float v2z = vertices[v2_x_id + 2]; const float v2w = vertices[v2_x_id + 3]; const bool is_valid = ComputeTriangleBoundingBox( v0x, v0y, v0z, v0w, v1x, v1y, v1z, v1w, v2x, v2y, v2z, v2w, image_width, image_height, &left, &right, &bottom, &top); // Ignore triangles that do not overlap with any screen pixels. if (!is_valid) continue; ComputeUnnormalizedMatrixInverse( ToFixedPoint(v0x), ToFixedPoint(v1x), ToFixedPoint(v2x), ToFixedPoint(v0y), ToFixedPoint(v1y), ToFixedPoint(v2y), ToFixedPoint(v0w), ToFixedPoint(v1w), ToFixedPoint(v2w), face_culling_mode, unnormalized_matrix_inverse); // Iterate over each pixel in the bounding box. for (int iy = bottom; iy < top; ++iy) { for (int ix = left; ix < right; ++ix) { const float px = ((ix + 0.5f) / half_image_width) - 1.0f; const float py = ((iy + 0.5f) / half_image_height) - 1.0f; ComputeEdgeFunctions(px, py, unnormalized_matrix_inverse, b_over_w); if (!PixelIsInsideTriangle(b_over_w)) { continue; } const float one_over_w = b_over_w[0] + b_over_w[1] + b_over_w[2]; const float b0 = b_over_w[0] / one_over_w; const float b1 = b_over_w[1] / one_over_w; const float b2 = b_over_w[2] / one_over_w; // Since we computed an unnormalized w above, we need to recompute // a properly scaled clip-space w value and then divide clip-space z // by that. const float clip_z = b0 * v0z + b1 * v1z + b2 * v2z; const float clip_w = b0 * v0w + b1 * v1w + b2 * v2w; const float z = clip_z / clip_w; // Skip the pixel if it is beyond the near or far clipping plane. if (z < -1.0f || z > 1.0f) continue; // Insert into appropriate depth layer with insertion sort. float z_next = z; int32 id_next = triangle_id; float b0_next = b0; float b1_next = b1; float b2_next = b2; const int pixel_idx0 = iy * image_width + ix; for (int layer = 0; layer < num_layers; ++layer) { const int pixel_idx = pixel_idx0 + image_height * image_width * layer; if (z_next < z_buffer[pixel_idx]) { std::swap(z_next, z_buffer[pixel_idx]); std::swap(id_next, triangle_ids[pixel_idx]); if (barycentric_coordinates != nullptr) { std::swap(b0_next, barycentric_coordinates[3 * pixel_idx + 0]); std::swap(b1_next, barycentric_coordinates[3 * pixel_idx + 1]); std::swap(b2_next, barycentric_coordinates[3 * pixel_idx + 2]); } } // Exit the loop early if the clear depth (z == 1) is reached. if (z_next == 1) break; } } } } }
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./.git/hooks/fsmonitor-watchman.sample
#!/usr/bin/perl use strict; use warnings; use IPC::Open2; # An example hook script to integrate Watchman # (https://facebook.github.io/watchman/) with git to speed up detecting # new and modified files. # # The hook is passed a version (currently 1) and a time in nanoseconds # formatted as a string and outputs to stdout all files that have been # modified since the given time. Paths must be relative to the root of # the working tree and separated by a single NUL. # # To enable this hook, rename this file to "query-watchman" and set # 'git config core.fsmonitor .git/hooks/query-watchman' # my ($version, $time) = @ARGV; # Check the hook interface version if ($version == 1) { # convert nanoseconds to seconds # subtract one second to make sure watchman will return all changes $time = int ($time / 1000000000) - 1; } else { die "Unsupported query-fsmonitor hook version '$version'.\n" . "Falling back to scanning...\n"; } my $git_work_tree; if ($^O =~ 'msys' || $^O =~ 'cygwin') { $git_work_tree = Win32::GetCwd(); $git_work_tree =~ tr/\\/\//; } else { require Cwd; $git_work_tree = Cwd::cwd(); } my $retry = 1; launch_watchman(); sub launch_watchman { my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j --no-pretty') or die "open2() failed: $!\n" . "Falling back to scanning...\n"; # In the query expression below we're asking for names of files that # changed since $time but were not transient (ie created after # $time but no longer exist). # # To accomplish this, we're using the "since" generator to use the # recency index to select candidate nodes and "fields" to limit the # output to file names only. my $query = <<" END"; ["query", "$git_work_tree", { "since": $time, "fields": ["name"] }] END print CHLD_IN $query; close CHLD_IN; my $response = do {local $/; <CHLD_OUT>}; die "Watchman: command returned no output.\n" . "Falling back to scanning...\n" if $response eq ""; die "Watchman: command returned invalid output: $response\n" . "Falling back to scanning...\n" unless $response =~ /^\{/; my $json_pkg; eval { require JSON::XS; $json_pkg = "JSON::XS"; 1; } or do { require JSON::PP; $json_pkg = "JSON::PP"; }; my $o = $json_pkg->new->utf8->decode($response); if ($retry > 0 and $o->{error} and $o->{error} =~ m/unable to resolve root .* directory (.*) is not watched/) { print STDERR "Adding '$git_work_tree' to watchman's watch list.\n"; $retry--; qx/watchman watch "$git_work_tree"/; die "Failed to make watchman watch '$git_work_tree'.\n" . "Falling back to scanning...\n" if $? != 0; # Watchman will always return all files on the first query so # return the fast "everything is dirty" flag to git and do the # Watchman query just to get it over with now so we won't pay # the cost in git to look up each individual file. print "/\0"; eval { launch_watchman() }; exit 0; } die "Watchman: $o->{error}.\n" . "Falling back to scanning...\n" if $o->{error}; binmode STDOUT, ":utf8"; local $, = "\0"; print @{$o->{files}}; }
#!/usr/bin/perl use strict; use warnings; use IPC::Open2; # An example hook script to integrate Watchman # (https://facebook.github.io/watchman/) with git to speed up detecting # new and modified files. # # The hook is passed a version (currently 1) and a time in nanoseconds # formatted as a string and outputs to stdout all files that have been # modified since the given time. Paths must be relative to the root of # the working tree and separated by a single NUL. # # To enable this hook, rename this file to "query-watchman" and set # 'git config core.fsmonitor .git/hooks/query-watchman' # my ($version, $time) = @ARGV; # Check the hook interface version if ($version == 1) { # convert nanoseconds to seconds # subtract one second to make sure watchman will return all changes $time = int ($time / 1000000000) - 1; } else { die "Unsupported query-fsmonitor hook version '$version'.\n" . "Falling back to scanning...\n"; } my $git_work_tree; if ($^O =~ 'msys' || $^O =~ 'cygwin') { $git_work_tree = Win32::GetCwd(); $git_work_tree =~ tr/\\/\//; } else { require Cwd; $git_work_tree = Cwd::cwd(); } my $retry = 1; launch_watchman(); sub launch_watchman { my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j --no-pretty') or die "open2() failed: $!\n" . "Falling back to scanning...\n"; # In the query expression below we're asking for names of files that # changed since $time but were not transient (ie created after # $time but no longer exist). # # To accomplish this, we're using the "since" generator to use the # recency index to select candidate nodes and "fields" to limit the # output to file names only. my $query = <<" END"; ["query", "$git_work_tree", { "since": $time, "fields": ["name"] }] END print CHLD_IN $query; close CHLD_IN; my $response = do {local $/; <CHLD_OUT>}; die "Watchman: command returned no output.\n" . "Falling back to scanning...\n" if $response eq ""; die "Watchman: command returned invalid output: $response\n" . "Falling back to scanning...\n" unless $response =~ /^\{/; my $json_pkg; eval { require JSON::XS; $json_pkg = "JSON::XS"; 1; } or do { require JSON::PP; $json_pkg = "JSON::PP"; }; my $o = $json_pkg->new->utf8->decode($response); if ($retry > 0 and $o->{error} and $o->{error} =~ m/unable to resolve root .* directory (.*) is not watched/) { print STDERR "Adding '$git_work_tree' to watchman's watch list.\n"; $retry--; qx/watchman watch "$git_work_tree"/; die "Failed to make watchman watch '$git_work_tree'.\n" . "Falling back to scanning...\n" if $? != 0; # Watchman will always return all files on the first query so # return the fast "everything is dirty" flag to git and do the # Watchman query just to get it over with now so we won't pay # the cost in git to look up each individual file. print "/\0"; eval { launch_watchman() }; exit 0; } die "Watchman: $o->{error}.\n" . "Falling back to scanning...\n" if $o->{error}; binmode STDOUT, ":utf8"; local $, = "\0"; print @{$o->{files}}; }
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/geometry/transformation/tests/quaternion_test.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for quaternion.""" from absl.testing import flagsaver from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.transformation import axis_angle from tensorflow_graphics.geometry.transformation import quaternion from tensorflow_graphics.geometry.transformation import rotation_matrix_3d from tensorflow_graphics.geometry.transformation.tests import test_data as td from tensorflow_graphics.geometry.transformation.tests import test_helpers from tensorflow_graphics.util import test_case class QuaternionTest(test_case.TestCase): @parameterized.parameters( ((3,), (3,)), ((None, 3), (None, 3)), ) def test_between_two_vectors_3d_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(quaternion.between_two_vectors_3d, shapes) @parameterized.parameters( ("must have exactly 3 dimensions", (2,), (3,)), ("must have exactly 3 dimensions", (3,), (2,)), ) def test_between_two_vectors_3d_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.between_two_vectors_3d, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_between_two_vectors_3d_jacobian_random(self): """Tests the Jacobian of between_two_vectors_3d.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() x_1_init = np.random.random(tensor_shape + [3]) x_2_init = np.random.random(tensor_shape + [3]) self.assert_jacobian_is_correct_fn( quaternion.between_two_vectors_3d, [x_1_init, x_2_init], atol=1e-4) def test_between_two_vectors_3d_random(self): """Checks the extracted rotation between two 3d vectors.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() source = np.random.random(tensor_shape + [3]).astype(np.float32) target = np.random.random(tensor_shape + [3]).astype(np.float32) rotation = quaternion.between_two_vectors_3d(source, target) rec_target = quaternion.rotate(source, rotation) self.assertAllClose( tf.nn.l2_normalize(target, axis=-1), tf.nn.l2_normalize(rec_target, axis=-1)) # Checks that resulting quaternions are normalized. self.assertAllEqual( quaternion.is_normalized(rotation), np.full(tensor_shape + [1], True)) def test_between_two_vectors_3d_that_are_the_same(self): """Checks the extracted rotation between two identical 3d vectors.""" source = np.random.random((1, 3)) rotation = quaternion.between_two_vectors_3d(source, source) self.assertAllEqual([[0, 0, 0, 1]], rotation) def test_between_two_vectors_3d_that_are_collinear(self): """Checks the extracted rotation between two collinear 3d vectors.""" axis = [(1.0, 0.0, 0.0), (0.0, 1.0, 0.0)] antiparallel_axis = [(0.0, 1.0, 0.0), (0.0, 0.0, 1.0)] source = np.multiply(axis, 10.) target = np.multiply(axis, -10.) rotation = quaternion.between_two_vectors_3d(source, target) rotation_pi = quaternion.from_axis_angle(antiparallel_axis, [[np.pi], [np.pi]]) self.assertAllClose(rotation_pi, rotation) @parameterized.parameters( ((4,),), ((None, 4),), ) def test_conjugate_exception_not_raised(self, *shape): """Tests that the shape exceptions of conjugate are not raised.""" self.assert_exception_is_not_raised(quaternion.conjugate, shape) @parameterized.parameters( ("must have exactly 4 dimensions", (3,)),) def test_conjugate_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.conjugate, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_conjugate_jacobian_preset(self): """Test the Jacobian of the conjugate function.""" x_init = test_helpers.generate_preset_test_quaternions() self.assert_jacobian_is_correct_fn(quaternion.conjugate, [x_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_conjugate_jacobian_random(self): """Test the Jacobian of the conjugate function.""" x_init = test_helpers.generate_random_test_quaternions() self.assert_jacobian_is_correct_fn(quaternion.conjugate, [x_init]) @parameterized.parameters( ((3,), (1,)), ((None, 3), (None, 1)), ) def test_from_axis_angle_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(quaternion.from_axis_angle, shapes) @parameterized.parameters( ("must have exactly 3 dimensions", (2,), (1,)), ("must have exactly 1 dimensions", (3,), (2,)), ) def test_from_axis_angle_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.from_axis_angle, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_axis_angle_jacobian_preset(self): """Test the Jacobian of the from_axis_angle function.""" x_axis_init, x_angle_init = test_helpers.generate_preset_test_axis_angle() self.assert_jacobian_is_correct_fn(quaternion.from_axis_angle, [x_axis_init, x_angle_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_axis_angle_jacobian_random(self): """Test the Jacobian of the from_axis_angle function.""" x_axis_init, x_angle_init = test_helpers.generate_random_test_axis_angle() self.assert_jacobian_is_correct_fn(quaternion.from_axis_angle, [x_axis_init, x_angle_init]) def test_from_axis_angle_normalized_random(self): """Test that from_axis_angle produces normalized quaternions.""" random_axis, random_angle = test_helpers.generate_random_test_axis_angle() random_quaternion = quaternion.from_axis_angle(random_axis, random_angle) self.assertAllEqual( quaternion.is_normalized(random_quaternion), np.ones(shape=random_angle.shape, dtype=bool)) def test_from_axis_angle_random(self): """Tests converting an axis-angle to a quaternion.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() axis, angle = axis_angle.from_euler(random_euler_angles) grountruth = rotation_matrix_3d.from_quaternion( quaternion.from_euler(random_euler_angles)) prediction = rotation_matrix_3d.from_quaternion( quaternion.from_axis_angle(axis, angle)) self.assertAllClose(grountruth, prediction, rtol=1e-3) @parameterized.parameters( ((3,),), ((None, 3),), ) def test_from_euler_exception_not_raised(self, *shape): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(quaternion.from_euler, shape) @parameterized.parameters( ("must have exactly 3 dimensions", (4,)),) def test_from_euler_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.from_euler, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_euler_jacobian_preset(self): """Test the Jacobian of the from_euler function.""" x_init = test_helpers.generate_preset_test_euler_angles() self.assert_jacobian_is_correct_fn(quaternion.from_euler, [x_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_euler_jacobian_random(self): """Test the Jacobian of the from_euler function.""" x_init = test_helpers.generate_random_test_euler_angles() self.assert_jacobian_is_correct_fn(quaternion.from_euler, [x_init]) def test_from_euler_normalized_random(self): """Tests that quaternions.from_euler returns normalized quaterions.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() tensor_shape = random_euler_angles.shape[:-1] random_quaternion = quaternion.from_euler(random_euler_angles) self.assertAllEqual( quaternion.is_normalized(random_quaternion), np.ones(shape=tensor_shape + (1,), dtype=bool)) def test_from_euler_random(self): """Tests that quaternions can be constructed from Euler angles.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() tensor_shape = random_euler_angles.shape[:-1] random_matrix = rotation_matrix_3d.from_euler(random_euler_angles) random_quaternion = quaternion.from_euler(random_euler_angles) random_point = np.random.normal(size=tensor_shape + (3,)) rotated_with_matrix = rotation_matrix_3d.rotate(random_point, random_matrix) rotated_with_quaternion = quaternion.rotate(random_point, random_quaternion) self.assertAllClose(rotated_with_matrix, rotated_with_quaternion) @parameterized.parameters( ((3,),), ((None, 3),), ) def test_from_euler_with_small_angles_approximation_exception_not_raised( self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised( quaternion.from_euler_with_small_angles_approximation, shapes) @parameterized.parameters( ("must have exactly 3 dimensions", (4,)),) def test_from_euler_with_small_angles_approximation_exception_raised( self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised( quaternion.from_euler_with_small_angles_approximation, error_msg, shape) def test_from_euler_with_small_angles_approximation_random(self): # Only generate small angles. For a test tolerance of 1e-3, 0.33 was found # empirically to be the range where the small angle approximation works. random_euler_angles = test_helpers.generate_random_test_euler_angles( min_angle=-0.33, max_angle=0.33) exact_quaternion = quaternion.from_euler(random_euler_angles) approximate_quaternion = ( quaternion.from_euler_with_small_angles_approximation( random_euler_angles)) self.assertAllClose(exact_quaternion, approximate_quaternion, atol=1e-3) @parameterized.parameters( ("must have a rank greater than 1", (3,)), ("must have exactly 3 dimensions", (4, 3)), ("must have exactly 3 dimensions", (3, 4)), ) def test_from_rotation_matrix_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.from_rotation_matrix, error_msg, shape) @parameterized.parameters( ((3, 3),), ((None, 3, 3),), ) def test_from_rotation_matrix_not_raised(self, *shape): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(quaternion.from_rotation_matrix, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_rotation_matrix_jacobian_preset(self): """Test the Jacobian of the from_rotation_matrix function.""" x_init = test_helpers.generate_preset_test_rotation_matrices_3d() x = tf.convert_to_tensor(value=x_init) y = quaternion.from_rotation_matrix(x) self.assert_jacobian_is_finite(x, x_init, y) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_rotation_matrix_jacobian_random(self): """Test the Jacobian of the from_rotation_matrix function.""" x_init = test_helpers.generate_random_test_rotation_matrix_3d() self.assert_jacobian_is_finite_fn(quaternion.from_rotation_matrix, [x_init]) def test_from_rotation_matrix_normalized_random(self): """Tests that from_rotation_matrix produces normalized quaternions.""" random_matrix = test_helpers.generate_random_test_rotation_matrix_3d() random_quaternion = quaternion.from_rotation_matrix(random_matrix) self.assertAllEqual( quaternion.is_normalized(random_quaternion), np.ones(shape=random_matrix.shape[:-2] + (1,), dtype=bool)) @parameterized.parameters( ((td.MAT_3D_ID,), (td.QUAT_ID,)), ((td.MAT_3D_X_45,), (td.QUAT_X_45,)), ((td.MAT_3D_Y_45,), (td.QUAT_Y_45,)), ((td.MAT_3D_Z_45,), (td.QUAT_Z_45,)), ((td.MAT_3D_X_90,), (td.QUAT_X_90,)), ((td.MAT_3D_Y_90,), (td.QUAT_Y_90,)), ((td.MAT_3D_Z_90,), (td.QUAT_Z_90,)), ((td.MAT_3D_X_180,), (td.QUAT_X_180,)), ((td.MAT_3D_Y_180,), (td.QUAT_Y_180,)), ((td.MAT_3D_Z_180,), (td.QUAT_Z_180,)), ) def test_from_rotation_matrix_preset(self, test_inputs, test_outputs): self.assert_output_is_correct(quaternion.from_rotation_matrix, test_inputs, test_outputs) def test_from_rotation_matrix_random(self): """Tests that from_rotation_matrix produces the expected quaternions.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() random_rotation_matrix_3d = rotation_matrix_3d.from_euler( random_euler_angles) groundtruth = rotation_matrix_3d.from_quaternion( quaternion.from_euler(random_euler_angles)) prediction = rotation_matrix_3d.from_quaternion( quaternion.from_rotation_matrix(random_rotation_matrix_3d)) self.assertAllClose(groundtruth, prediction) @parameterized.parameters( ((4,),), ((None, 4),), ) def test_inverse_exception_not_raised(self, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_not_raised(quaternion.inverse, shape) @parameterized.parameters( ("must have exactly 4 dimensions", (3,)),) def test_inverse_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.inverse, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_inverse_jacobian_preset(self): """Test the Jacobian of the inverse function.""" x_init = test_helpers.generate_preset_test_quaternions() self.assert_jacobian_is_correct_fn(quaternion.inverse, [x_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_inverse_jacobian_random(self): """Test the Jacobian of the inverse function.""" x_init = test_helpers.generate_random_test_quaternions() self.assert_jacobian_is_correct_fn(quaternion.inverse, [x_init]) def test_inverse_normalized_random(self): """Tests that the inverse function returns normalized quaternions.""" random_quaternion = test_helpers.generate_random_test_quaternions() inverse_quaternion = quaternion.inverse(random_quaternion) self.assertAllEqual( quaternion.is_normalized(inverse_quaternion), np.ones(shape=random_quaternion.shape[:-1] + (1,), dtype=bool)) def test_inverse_random(self): """Tests that multiplying with the inverse gives identity.""" random_quaternion = test_helpers.generate_random_test_quaternions() inverse_quaternion = quaternion.inverse(random_quaternion) final_quaternion = quaternion.multiply(random_quaternion, inverse_quaternion) tensor_shape = random_quaternion.shape[:-1] identity_quaternion = np.array((0.0, 0.0, 0.0, 1.0), dtype=np.float32) identity_quaternion = np.tile(identity_quaternion, tensor_shape + (1,)) self.assertAllClose(final_quaternion, identity_quaternion, rtol=1e-3) @parameterized.parameters( ((4,),), ((None, 4),), ) def test_is_normalized_exception_not_raised(self, *shape): """Tests that the shape exceptions of from_quaternion are not raised.""" self.assert_exception_is_not_raised(quaternion.is_normalized, shape) @parameterized.parameters( ("must have exactly 4 dimensions", (1, 5)),) def test_is_normalized_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions of from_quaternion are raised.""" self.assert_exception_is_raised(quaternion.is_normalized, error_msg, shape) def test_is_normalized_random(self): """Tests that is_normalized works as intended.""" random_quaternion = test_helpers.generate_random_test_quaternions() tensor_shape = random_quaternion.shape[:-1] unnormalized_random_quaternion = random_quaternion * 1.01 quat = np.concatenate((random_quaternion, unnormalized_random_quaternion), axis=0) mask = np.concatenate( (np.ones(shape=tensor_shape + (1,), dtype=bool), np.zeros(shape=tensor_shape + (1,), dtype=bool)), axis=0) is_normalized = quaternion.is_normalized(quat) self.assertAllEqual(mask, is_normalized) @parameterized.parameters( ((4,),), ((None, 4),), ) def test_normalize_exception_not_raised(self, *shape): """Tests that the shape exceptions of from_quaternion are not raised.""" self.assert_exception_is_not_raised(quaternion.normalize, shape) @parameterized.parameters( ("must have exactly 4 dimensions", (1, 5)),) def test_normalize_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions of from_quaternion are raised.""" self.assert_exception_is_raised(quaternion.normalize, error_msg, shape) def test_normalize_random(self): """Tests that normalize works as intended.""" random_quaternion = test_helpers.generate_random_test_quaternions() tensor_shape = random_quaternion.shape[:-1] unnormalized_random_quaternion = random_quaternion * 1.01 quat = np.concatenate((random_quaternion, unnormalized_random_quaternion), axis=0) mask = np.concatenate( (np.ones(shape=tensor_shape + (1,), dtype=bool), np.zeros(shape=tensor_shape + (1,), dtype=bool)), axis=0) is_normalized_before = quaternion.is_normalized(quat) normalized = quaternion.normalize(quat) is_normalized_after = quaternion.is_normalized(normalized) self.assertAllEqual(mask, is_normalized_before) self.assertAllEqual(is_normalized_after, np.ones(shape=is_normalized_after.shape, dtype=bool)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_normalize_jacobian_preset(self): """Test the Jacobian of the normalize function.""" x_init = test_helpers.generate_preset_test_quaternions() self.assert_jacobian_is_correct_fn(quaternion.normalize, [x_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_normalize_jacobian_random(self): """Test the Jacobian of the normalize function.""" tensor_dimensions = np.random.randint(low=1, high=3) tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist() x_init = test_helpers.generate_random_test_quaternions(tensor_shape) self.assert_jacobian_is_correct_fn(quaternion.normalize, [x_init]) @parameterized.parameters( ((4,), (4,)), ((None, 4), (None, 4)), ) def test_multiply_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(quaternion.multiply, shapes) @parameterized.parameters( ("must have exactly 4 dimensions", (3,), (4,)), ("must have exactly 4 dimensions", (4,), (3,)), ) def test_multiply_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.multiply, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_multiply_jacobian_preset(self): """Test the Jacobian of the multiply function.""" x_1_init = test_helpers.generate_preset_test_quaternions() x_2_init = test_helpers.generate_preset_test_quaternions() self.assert_jacobian_is_correct_fn(quaternion.multiply, [x_1_init, x_2_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_multiply_jacobian_random(self): """Test the Jacobian of the multiply function.""" tensor_dimensions = np.random.randint(low=1, high=3) tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist() x_1_init = test_helpers.generate_random_test_quaternions(tensor_shape) x_2_init = test_helpers.generate_random_test_quaternions(tensor_shape) self.assert_jacobian_is_correct_fn(quaternion.multiply, [x_1_init, x_2_init]) def test_normalized_random_initializer_raised(self): """Tests that the shape exceptions are raised.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() with self.subTest(name="dtype"): with self.assertRaisesRegexp(ValueError, "'dtype' must be tf.float32."): tf.compat.v1.get_variable( "test_variable", shape=tensor_shape + [4], dtype=tf.uint8, initializer=quaternion.normalized_random_uniform_initializer(), use_resource=False) with self.subTest(name="shape"): with self.assertRaisesRegexp(ValueError, "Last dimension of 'shape' must be 4."): tf.compat.v1.get_variable( "test_variable", shape=tensor_shape + [3], dtype=tf.float32, initializer=quaternion.normalized_random_uniform_initializer(), use_resource=False) def test_normalized_random_uniform_initializer_is_normalized(self): """Tests normalized_random_uniform_initializer outputs are normalized.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() variable = tf.compat.v1.get_variable( "test_variable", shape=tensor_shape + [4], dtype=tf.float32, initializer=quaternion.normalized_random_uniform_initializer(), use_resource=False) self.evaluate(tf.compat.v1.global_variables_initializer()) value = self.evaluate(variable) norms = np.linalg.norm(value, axis=-1) ones = np.ones(tensor_shape) self.assertAllClose(norms, ones, rtol=1e-3) def test_normalized_random_uniform_is_normalized(self): """Tests that the normalized_random_uniform gives normalized quaternions.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() tensor = quaternion.normalized_random_uniform(tensor_shape) norms = tf.norm(tensor=tensor, axis=-1) ones = np.ones(tensor_shape) self.assertAllClose(norms, ones, rtol=1e-3) @parameterized.parameters( ((3,), (4,)), ((None, 3), (None, 4)), ) def test_rotate_exception_not_raised(self, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_not_raised(quaternion.rotate, shape) @parameterized.parameters( ("must have exactly 3 dimensions", (2,), (4,)), ("must have exactly 4 dimensions", (3,), (2,)), ) def test_rotate_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.rotate, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_rotate_jacobian_preset(self): """Test the Jacobian of the rotate function.""" x_matrix_init = test_helpers.generate_preset_test_quaternions() tensor_shape = x_matrix_init.shape[:-1] + (3,) x_point_init = np.random.uniform(size=tensor_shape) self.assert_jacobian_is_correct_fn(quaternion.rotate, [x_point_init, x_matrix_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_rotate_jacobian_random(self): """Test the Jacobian of the rotate function.""" x_matrix_init = test_helpers.generate_random_test_quaternions() tensor_shape = x_matrix_init.shape[:-1] + (3,) x_point_init = np.random.uniform(size=tensor_shape) self.assert_jacobian_is_correct_fn(quaternion.rotate, [x_point_init, x_matrix_init]) def test_rotate_random(self): """Tests the rotation using a quaternion vs a rotation matrix.""" random_quaternion = test_helpers.generate_random_test_quaternions() tensor_shape = random_quaternion.shape[:-1] random_point = np.random.normal(size=tensor_shape + (3,)) rotated_point_quaternion = quaternion.rotate(random_point, random_quaternion) matrix = rotation_matrix_3d.from_quaternion(random_quaternion) rotated_point_matrix = rotation_matrix_3d.rotate(random_point, matrix) self.assertAllClose( rotated_point_matrix, rotated_point_quaternion, rtol=1e-3) @parameterized.parameters( ((td.QUAT_ID, td.QUAT_X_45), (np.pi / 4.0,)), ((td.QUAT_X_45, td.QUAT_ID), (np.pi / 4.0,)), ((td.QUAT_Y_90, td.QUAT_Y_180), (np.pi / 2.0,)), ((td.QUAT_X_180, td.QUAT_Z_180), (np.pi,)), ((td.QUAT_X_180, -1.0 * td.QUAT_Y_180), (np.pi,)), ((td.QUAT_X_180, td.QUAT_X_180), (0.0,)), ((td.QUAT_X_180, -1 * td.QUAT_X_180), (0.0,)), ((td.QUAT_X_90, td.QUAT_Y_90), (2 * np.pi / 3.0,)), ((np.array([0., 0., 0., 1]), np.array([0., 0., 0., 1])), (0.0,)), ) def test_relative_angle(self, test_inputs, test_outputs): """Tests quaternion relative angle.""" self.assert_output_is_correct(quaternion.relative_angle, test_inputs, test_outputs) @parameterized.parameters( ((4,), (4,)), ((None, 4), (None, 4)), ((None, None, 4), (None, None, 4)), ) def test_relative_angle_not_raised(self, *shapes): """Tests that the shape exceptions of relative_angle are not raised.""" self.assert_exception_is_not_raised(quaternion.relative_angle, shapes) @parameterized.parameters( ("must have exactly 4 dimensions", (3,), (4,)), ("must have exactly 4 dimensions", (4,), (3,)), ) def test_relative_angle_raised(self, error_msg, *shape): """Tests that the shape exceptions of relative_angle are raised.""" self.assert_exception_is_raised(quaternion.relative_angle, error_msg, shape) def test_valid_relative_angle_random(self): """Test the output is in valid range for relative_angle function.""" tensor_dimensions = np.random.randint(low=1, high=3) tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist() x_1_init = test_helpers.generate_random_test_quaternions(tensor_shape) x_2_init = test_helpers.generate_random_test_quaternions(tensor_shape) x_1 = tf.convert_to_tensor(value=x_1_init) x_2 = tf.convert_to_tensor(value=x_2_init) y = quaternion.relative_angle(x_1, x_2) self.assertAllGreaterEqual(y, 0.0) self.assertAllLessEqual(y, np.pi) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_jacobian_relative_angle_random(self): """Test the Jacobian of the relative_angle function.""" tensor_dimensions = np.random.randint(low=1, high=3) tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist() x_1_init = test_helpers.generate_random_test_quaternions(tensor_shape) x_2_init = test_helpers.generate_random_test_quaternions(tensor_shape) self.assert_jacobian_is_correct_fn(quaternion.relative_angle, [x_1_init, x_2_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_jacobian_relative_angle_preset(self): """Test the Jacobian of the relative_angle function.""" x_1_init = test_helpers.generate_preset_test_quaternions() x_2_init = test_helpers.generate_preset_test_quaternions() # relative angle is not smooth near <q1, q2> = 1, which occurs for # certain preset test quaternions. self.assert_jacobian_is_finite_fn(quaternion.relative_angle, [x_1_init, x_2_init]) if __name__ == "__main__": test_case.main()
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for quaternion.""" from absl.testing import flagsaver from absl.testing import parameterized import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.transformation import axis_angle from tensorflow_graphics.geometry.transformation import quaternion from tensorflow_graphics.geometry.transformation import rotation_matrix_3d from tensorflow_graphics.geometry.transformation.tests import test_data as td from tensorflow_graphics.geometry.transformation.tests import test_helpers from tensorflow_graphics.util import test_case class QuaternionTest(test_case.TestCase): @parameterized.parameters( ((3,), (3,)), ((None, 3), (None, 3)), ) def test_between_two_vectors_3d_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(quaternion.between_two_vectors_3d, shapes) @parameterized.parameters( ("must have exactly 3 dimensions", (2,), (3,)), ("must have exactly 3 dimensions", (3,), (2,)), ) def test_between_two_vectors_3d_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.between_two_vectors_3d, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_between_two_vectors_3d_jacobian_random(self): """Tests the Jacobian of between_two_vectors_3d.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() x_1_init = np.random.random(tensor_shape + [3]) x_2_init = np.random.random(tensor_shape + [3]) self.assert_jacobian_is_correct_fn( quaternion.between_two_vectors_3d, [x_1_init, x_2_init], atol=1e-4) def test_between_two_vectors_3d_random(self): """Checks the extracted rotation between two 3d vectors.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() source = np.random.random(tensor_shape + [3]).astype(np.float32) target = np.random.random(tensor_shape + [3]).astype(np.float32) rotation = quaternion.between_two_vectors_3d(source, target) rec_target = quaternion.rotate(source, rotation) self.assertAllClose( tf.nn.l2_normalize(target, axis=-1), tf.nn.l2_normalize(rec_target, axis=-1)) # Checks that resulting quaternions are normalized. self.assertAllEqual( quaternion.is_normalized(rotation), np.full(tensor_shape + [1], True)) def test_between_two_vectors_3d_that_are_the_same(self): """Checks the extracted rotation between two identical 3d vectors.""" source = np.random.random((1, 3)) rotation = quaternion.between_two_vectors_3d(source, source) self.assertAllEqual([[0, 0, 0, 1]], rotation) def test_between_two_vectors_3d_that_are_collinear(self): """Checks the extracted rotation between two collinear 3d vectors.""" axis = [(1.0, 0.0, 0.0), (0.0, 1.0, 0.0)] antiparallel_axis = [(0.0, 1.0, 0.0), (0.0, 0.0, 1.0)] source = np.multiply(axis, 10.) target = np.multiply(axis, -10.) rotation = quaternion.between_two_vectors_3d(source, target) rotation_pi = quaternion.from_axis_angle(antiparallel_axis, [[np.pi], [np.pi]]) self.assertAllClose(rotation_pi, rotation) @parameterized.parameters( ((4,),), ((None, 4),), ) def test_conjugate_exception_not_raised(self, *shape): """Tests that the shape exceptions of conjugate are not raised.""" self.assert_exception_is_not_raised(quaternion.conjugate, shape) @parameterized.parameters( ("must have exactly 4 dimensions", (3,)),) def test_conjugate_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.conjugate, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_conjugate_jacobian_preset(self): """Test the Jacobian of the conjugate function.""" x_init = test_helpers.generate_preset_test_quaternions() self.assert_jacobian_is_correct_fn(quaternion.conjugate, [x_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_conjugate_jacobian_random(self): """Test the Jacobian of the conjugate function.""" x_init = test_helpers.generate_random_test_quaternions() self.assert_jacobian_is_correct_fn(quaternion.conjugate, [x_init]) @parameterized.parameters( ((3,), (1,)), ((None, 3), (None, 1)), ) def test_from_axis_angle_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(quaternion.from_axis_angle, shapes) @parameterized.parameters( ("must have exactly 3 dimensions", (2,), (1,)), ("must have exactly 1 dimensions", (3,), (2,)), ) def test_from_axis_angle_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.from_axis_angle, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_axis_angle_jacobian_preset(self): """Test the Jacobian of the from_axis_angle function.""" x_axis_init, x_angle_init = test_helpers.generate_preset_test_axis_angle() self.assert_jacobian_is_correct_fn(quaternion.from_axis_angle, [x_axis_init, x_angle_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_axis_angle_jacobian_random(self): """Test the Jacobian of the from_axis_angle function.""" x_axis_init, x_angle_init = test_helpers.generate_random_test_axis_angle() self.assert_jacobian_is_correct_fn(quaternion.from_axis_angle, [x_axis_init, x_angle_init]) def test_from_axis_angle_normalized_random(self): """Test that from_axis_angle produces normalized quaternions.""" random_axis, random_angle = test_helpers.generate_random_test_axis_angle() random_quaternion = quaternion.from_axis_angle(random_axis, random_angle) self.assertAllEqual( quaternion.is_normalized(random_quaternion), np.ones(shape=random_angle.shape, dtype=bool)) def test_from_axis_angle_random(self): """Tests converting an axis-angle to a quaternion.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() axis, angle = axis_angle.from_euler(random_euler_angles) grountruth = rotation_matrix_3d.from_quaternion( quaternion.from_euler(random_euler_angles)) prediction = rotation_matrix_3d.from_quaternion( quaternion.from_axis_angle(axis, angle)) self.assertAllClose(grountruth, prediction, rtol=1e-3) @parameterized.parameters( ((3,),), ((None, 3),), ) def test_from_euler_exception_not_raised(self, *shape): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(quaternion.from_euler, shape) @parameterized.parameters( ("must have exactly 3 dimensions", (4,)),) def test_from_euler_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.from_euler, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_euler_jacobian_preset(self): """Test the Jacobian of the from_euler function.""" x_init = test_helpers.generate_preset_test_euler_angles() self.assert_jacobian_is_correct_fn(quaternion.from_euler, [x_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_euler_jacobian_random(self): """Test the Jacobian of the from_euler function.""" x_init = test_helpers.generate_random_test_euler_angles() self.assert_jacobian_is_correct_fn(quaternion.from_euler, [x_init]) def test_from_euler_normalized_random(self): """Tests that quaternions.from_euler returns normalized quaterions.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() tensor_shape = random_euler_angles.shape[:-1] random_quaternion = quaternion.from_euler(random_euler_angles) self.assertAllEqual( quaternion.is_normalized(random_quaternion), np.ones(shape=tensor_shape + (1,), dtype=bool)) def test_from_euler_random(self): """Tests that quaternions can be constructed from Euler angles.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() tensor_shape = random_euler_angles.shape[:-1] random_matrix = rotation_matrix_3d.from_euler(random_euler_angles) random_quaternion = quaternion.from_euler(random_euler_angles) random_point = np.random.normal(size=tensor_shape + (3,)) rotated_with_matrix = rotation_matrix_3d.rotate(random_point, random_matrix) rotated_with_quaternion = quaternion.rotate(random_point, random_quaternion) self.assertAllClose(rotated_with_matrix, rotated_with_quaternion) @parameterized.parameters( ((3,),), ((None, 3),), ) def test_from_euler_with_small_angles_approximation_exception_not_raised( self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised( quaternion.from_euler_with_small_angles_approximation, shapes) @parameterized.parameters( ("must have exactly 3 dimensions", (4,)),) def test_from_euler_with_small_angles_approximation_exception_raised( self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised( quaternion.from_euler_with_small_angles_approximation, error_msg, shape) def test_from_euler_with_small_angles_approximation_random(self): # Only generate small angles. For a test tolerance of 1e-3, 0.33 was found # empirically to be the range where the small angle approximation works. random_euler_angles = test_helpers.generate_random_test_euler_angles( min_angle=-0.33, max_angle=0.33) exact_quaternion = quaternion.from_euler(random_euler_angles) approximate_quaternion = ( quaternion.from_euler_with_small_angles_approximation( random_euler_angles)) self.assertAllClose(exact_quaternion, approximate_quaternion, atol=1e-3) @parameterized.parameters( ("must have a rank greater than 1", (3,)), ("must have exactly 3 dimensions", (4, 3)), ("must have exactly 3 dimensions", (3, 4)), ) def test_from_rotation_matrix_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.from_rotation_matrix, error_msg, shape) @parameterized.parameters( ((3, 3),), ((None, 3, 3),), ) def test_from_rotation_matrix_not_raised(self, *shape): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(quaternion.from_rotation_matrix, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_rotation_matrix_jacobian_preset(self): """Test the Jacobian of the from_rotation_matrix function.""" x_init = test_helpers.generate_preset_test_rotation_matrices_3d() x = tf.convert_to_tensor(value=x_init) y = quaternion.from_rotation_matrix(x) self.assert_jacobian_is_finite(x, x_init, y) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_from_rotation_matrix_jacobian_random(self): """Test the Jacobian of the from_rotation_matrix function.""" x_init = test_helpers.generate_random_test_rotation_matrix_3d() self.assert_jacobian_is_finite_fn(quaternion.from_rotation_matrix, [x_init]) def test_from_rotation_matrix_normalized_random(self): """Tests that from_rotation_matrix produces normalized quaternions.""" random_matrix = test_helpers.generate_random_test_rotation_matrix_3d() random_quaternion = quaternion.from_rotation_matrix(random_matrix) self.assertAllEqual( quaternion.is_normalized(random_quaternion), np.ones(shape=random_matrix.shape[:-2] + (1,), dtype=bool)) @parameterized.parameters( ((td.MAT_3D_ID,), (td.QUAT_ID,)), ((td.MAT_3D_X_45,), (td.QUAT_X_45,)), ((td.MAT_3D_Y_45,), (td.QUAT_Y_45,)), ((td.MAT_3D_Z_45,), (td.QUAT_Z_45,)), ((td.MAT_3D_X_90,), (td.QUAT_X_90,)), ((td.MAT_3D_Y_90,), (td.QUAT_Y_90,)), ((td.MAT_3D_Z_90,), (td.QUAT_Z_90,)), ((td.MAT_3D_X_180,), (td.QUAT_X_180,)), ((td.MAT_3D_Y_180,), (td.QUAT_Y_180,)), ((td.MAT_3D_Z_180,), (td.QUAT_Z_180,)), ) def test_from_rotation_matrix_preset(self, test_inputs, test_outputs): self.assert_output_is_correct(quaternion.from_rotation_matrix, test_inputs, test_outputs) def test_from_rotation_matrix_random(self): """Tests that from_rotation_matrix produces the expected quaternions.""" random_euler_angles = test_helpers.generate_random_test_euler_angles() random_rotation_matrix_3d = rotation_matrix_3d.from_euler( random_euler_angles) groundtruth = rotation_matrix_3d.from_quaternion( quaternion.from_euler(random_euler_angles)) prediction = rotation_matrix_3d.from_quaternion( quaternion.from_rotation_matrix(random_rotation_matrix_3d)) self.assertAllClose(groundtruth, prediction) @parameterized.parameters( ((4,),), ((None, 4),), ) def test_inverse_exception_not_raised(self, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_not_raised(quaternion.inverse, shape) @parameterized.parameters( ("must have exactly 4 dimensions", (3,)),) def test_inverse_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.inverse, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_inverse_jacobian_preset(self): """Test the Jacobian of the inverse function.""" x_init = test_helpers.generate_preset_test_quaternions() self.assert_jacobian_is_correct_fn(quaternion.inverse, [x_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_inverse_jacobian_random(self): """Test the Jacobian of the inverse function.""" x_init = test_helpers.generate_random_test_quaternions() self.assert_jacobian_is_correct_fn(quaternion.inverse, [x_init]) def test_inverse_normalized_random(self): """Tests that the inverse function returns normalized quaternions.""" random_quaternion = test_helpers.generate_random_test_quaternions() inverse_quaternion = quaternion.inverse(random_quaternion) self.assertAllEqual( quaternion.is_normalized(inverse_quaternion), np.ones(shape=random_quaternion.shape[:-1] + (1,), dtype=bool)) def test_inverse_random(self): """Tests that multiplying with the inverse gives identity.""" random_quaternion = test_helpers.generate_random_test_quaternions() inverse_quaternion = quaternion.inverse(random_quaternion) final_quaternion = quaternion.multiply(random_quaternion, inverse_quaternion) tensor_shape = random_quaternion.shape[:-1] identity_quaternion = np.array((0.0, 0.0, 0.0, 1.0), dtype=np.float32) identity_quaternion = np.tile(identity_quaternion, tensor_shape + (1,)) self.assertAllClose(final_quaternion, identity_quaternion, rtol=1e-3) @parameterized.parameters( ((4,),), ((None, 4),), ) def test_is_normalized_exception_not_raised(self, *shape): """Tests that the shape exceptions of from_quaternion are not raised.""" self.assert_exception_is_not_raised(quaternion.is_normalized, shape) @parameterized.parameters( ("must have exactly 4 dimensions", (1, 5)),) def test_is_normalized_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions of from_quaternion are raised.""" self.assert_exception_is_raised(quaternion.is_normalized, error_msg, shape) def test_is_normalized_random(self): """Tests that is_normalized works as intended.""" random_quaternion = test_helpers.generate_random_test_quaternions() tensor_shape = random_quaternion.shape[:-1] unnormalized_random_quaternion = random_quaternion * 1.01 quat = np.concatenate((random_quaternion, unnormalized_random_quaternion), axis=0) mask = np.concatenate( (np.ones(shape=tensor_shape + (1,), dtype=bool), np.zeros(shape=tensor_shape + (1,), dtype=bool)), axis=0) is_normalized = quaternion.is_normalized(quat) self.assertAllEqual(mask, is_normalized) @parameterized.parameters( ((4,),), ((None, 4),), ) def test_normalize_exception_not_raised(self, *shape): """Tests that the shape exceptions of from_quaternion are not raised.""" self.assert_exception_is_not_raised(quaternion.normalize, shape) @parameterized.parameters( ("must have exactly 4 dimensions", (1, 5)),) def test_normalize_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions of from_quaternion are raised.""" self.assert_exception_is_raised(quaternion.normalize, error_msg, shape) def test_normalize_random(self): """Tests that normalize works as intended.""" random_quaternion = test_helpers.generate_random_test_quaternions() tensor_shape = random_quaternion.shape[:-1] unnormalized_random_quaternion = random_quaternion * 1.01 quat = np.concatenate((random_quaternion, unnormalized_random_quaternion), axis=0) mask = np.concatenate( (np.ones(shape=tensor_shape + (1,), dtype=bool), np.zeros(shape=tensor_shape + (1,), dtype=bool)), axis=0) is_normalized_before = quaternion.is_normalized(quat) normalized = quaternion.normalize(quat) is_normalized_after = quaternion.is_normalized(normalized) self.assertAllEqual(mask, is_normalized_before) self.assertAllEqual(is_normalized_after, np.ones(shape=is_normalized_after.shape, dtype=bool)) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_normalize_jacobian_preset(self): """Test the Jacobian of the normalize function.""" x_init = test_helpers.generate_preset_test_quaternions() self.assert_jacobian_is_correct_fn(quaternion.normalize, [x_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_normalize_jacobian_random(self): """Test the Jacobian of the normalize function.""" tensor_dimensions = np.random.randint(low=1, high=3) tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist() x_init = test_helpers.generate_random_test_quaternions(tensor_shape) self.assert_jacobian_is_correct_fn(quaternion.normalize, [x_init]) @parameterized.parameters( ((4,), (4,)), ((None, 4), (None, 4)), ) def test_multiply_exception_not_raised(self, *shapes): """Tests that the shape exceptions are not raised.""" self.assert_exception_is_not_raised(quaternion.multiply, shapes) @parameterized.parameters( ("must have exactly 4 dimensions", (3,), (4,)), ("must have exactly 4 dimensions", (4,), (3,)), ) def test_multiply_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.multiply, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_multiply_jacobian_preset(self): """Test the Jacobian of the multiply function.""" x_1_init = test_helpers.generate_preset_test_quaternions() x_2_init = test_helpers.generate_preset_test_quaternions() self.assert_jacobian_is_correct_fn(quaternion.multiply, [x_1_init, x_2_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_multiply_jacobian_random(self): """Test the Jacobian of the multiply function.""" tensor_dimensions = np.random.randint(low=1, high=3) tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist() x_1_init = test_helpers.generate_random_test_quaternions(tensor_shape) x_2_init = test_helpers.generate_random_test_quaternions(tensor_shape) self.assert_jacobian_is_correct_fn(quaternion.multiply, [x_1_init, x_2_init]) def test_normalized_random_initializer_raised(self): """Tests that the shape exceptions are raised.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() with self.subTest(name="dtype"): with self.assertRaisesRegexp(ValueError, "'dtype' must be tf.float32."): tf.compat.v1.get_variable( "test_variable", shape=tensor_shape + [4], dtype=tf.uint8, initializer=quaternion.normalized_random_uniform_initializer(), use_resource=False) with self.subTest(name="shape"): with self.assertRaisesRegexp(ValueError, "Last dimension of 'shape' must be 4."): tf.compat.v1.get_variable( "test_variable", shape=tensor_shape + [3], dtype=tf.float32, initializer=quaternion.normalized_random_uniform_initializer(), use_resource=False) def test_normalized_random_uniform_initializer_is_normalized(self): """Tests normalized_random_uniform_initializer outputs are normalized.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() variable = tf.compat.v1.get_variable( "test_variable", shape=tensor_shape + [4], dtype=tf.float32, initializer=quaternion.normalized_random_uniform_initializer(), use_resource=False) self.evaluate(tf.compat.v1.global_variables_initializer()) value = self.evaluate(variable) norms = np.linalg.norm(value, axis=-1) ones = np.ones(tensor_shape) self.assertAllClose(norms, ones, rtol=1e-3) def test_normalized_random_uniform_is_normalized(self): """Tests that the normalized_random_uniform gives normalized quaternions.""" tensor_size = np.random.randint(3) tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist() tensor = quaternion.normalized_random_uniform(tensor_shape) norms = tf.norm(tensor=tensor, axis=-1) ones = np.ones(tensor_shape) self.assertAllClose(norms, ones, rtol=1e-3) @parameterized.parameters( ((3,), (4,)), ((None, 3), (None, 4)), ) def test_rotate_exception_not_raised(self, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_not_raised(quaternion.rotate, shape) @parameterized.parameters( ("must have exactly 3 dimensions", (2,), (4,)), ("must have exactly 4 dimensions", (3,), (2,)), ) def test_rotate_exception_raised(self, error_msg, *shape): """Tests that the shape exceptions are raised.""" self.assert_exception_is_raised(quaternion.rotate, error_msg, shape) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_rotate_jacobian_preset(self): """Test the Jacobian of the rotate function.""" x_matrix_init = test_helpers.generate_preset_test_quaternions() tensor_shape = x_matrix_init.shape[:-1] + (3,) x_point_init = np.random.uniform(size=tensor_shape) self.assert_jacobian_is_correct_fn(quaternion.rotate, [x_point_init, x_matrix_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_rotate_jacobian_random(self): """Test the Jacobian of the rotate function.""" x_matrix_init = test_helpers.generate_random_test_quaternions() tensor_shape = x_matrix_init.shape[:-1] + (3,) x_point_init = np.random.uniform(size=tensor_shape) self.assert_jacobian_is_correct_fn(quaternion.rotate, [x_point_init, x_matrix_init]) def test_rotate_random(self): """Tests the rotation using a quaternion vs a rotation matrix.""" random_quaternion = test_helpers.generate_random_test_quaternions() tensor_shape = random_quaternion.shape[:-1] random_point = np.random.normal(size=tensor_shape + (3,)) rotated_point_quaternion = quaternion.rotate(random_point, random_quaternion) matrix = rotation_matrix_3d.from_quaternion(random_quaternion) rotated_point_matrix = rotation_matrix_3d.rotate(random_point, matrix) self.assertAllClose( rotated_point_matrix, rotated_point_quaternion, rtol=1e-3) @parameterized.parameters( ((td.QUAT_ID, td.QUAT_X_45), (np.pi / 4.0,)), ((td.QUAT_X_45, td.QUAT_ID), (np.pi / 4.0,)), ((td.QUAT_Y_90, td.QUAT_Y_180), (np.pi / 2.0,)), ((td.QUAT_X_180, td.QUAT_Z_180), (np.pi,)), ((td.QUAT_X_180, -1.0 * td.QUAT_Y_180), (np.pi,)), ((td.QUAT_X_180, td.QUAT_X_180), (0.0,)), ((td.QUAT_X_180, -1 * td.QUAT_X_180), (0.0,)), ((td.QUAT_X_90, td.QUAT_Y_90), (2 * np.pi / 3.0,)), ((np.array([0., 0., 0., 1]), np.array([0., 0., 0., 1])), (0.0,)), ) def test_relative_angle(self, test_inputs, test_outputs): """Tests quaternion relative angle.""" self.assert_output_is_correct(quaternion.relative_angle, test_inputs, test_outputs) @parameterized.parameters( ((4,), (4,)), ((None, 4), (None, 4)), ((None, None, 4), (None, None, 4)), ) def test_relative_angle_not_raised(self, *shapes): """Tests that the shape exceptions of relative_angle are not raised.""" self.assert_exception_is_not_raised(quaternion.relative_angle, shapes) @parameterized.parameters( ("must have exactly 4 dimensions", (3,), (4,)), ("must have exactly 4 dimensions", (4,), (3,)), ) def test_relative_angle_raised(self, error_msg, *shape): """Tests that the shape exceptions of relative_angle are raised.""" self.assert_exception_is_raised(quaternion.relative_angle, error_msg, shape) def test_valid_relative_angle_random(self): """Test the output is in valid range for relative_angle function.""" tensor_dimensions = np.random.randint(low=1, high=3) tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist() x_1_init = test_helpers.generate_random_test_quaternions(tensor_shape) x_2_init = test_helpers.generate_random_test_quaternions(tensor_shape) x_1 = tf.convert_to_tensor(value=x_1_init) x_2 = tf.convert_to_tensor(value=x_2_init) y = quaternion.relative_angle(x_1, x_2) self.assertAllGreaterEqual(y, 0.0) self.assertAllLessEqual(y, np.pi) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_jacobian_relative_angle_random(self): """Test the Jacobian of the relative_angle function.""" tensor_dimensions = np.random.randint(low=1, high=3) tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist() x_1_init = test_helpers.generate_random_test_quaternions(tensor_shape) x_2_init = test_helpers.generate_random_test_quaternions(tensor_shape) self.assert_jacobian_is_correct_fn(quaternion.relative_angle, [x_1_init, x_2_init]) @flagsaver.flagsaver(tfg_add_asserts_to_graph=False) def test_jacobian_relative_angle_preset(self): """Test the Jacobian of the relative_angle function.""" x_1_init = test_helpers.generate_preset_test_quaternions() x_2_init = test_helpers.generate_preset_test_quaternions() # relative angle is not smooth near <q1, q2> = 1, which occurs for # certain preset test quaternions. self.assert_jacobian_is_finite_fn(quaternion.relative_angle, [x_1_init, x_2_init]) if __name__ == "__main__": test_case.main()
-1
tensorflow/graphics
488
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
copybara-service[bot]
"2021-01-30T00:32:55Z"
"2021-02-02T20:48:00Z"
e539c142799936d76d84d0861951ed883a9b4673
9d257ad4a72ccf65e4349910b9fff7c0a5648073
Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.. Gather barycentrics, triangle indices, mask, etc. into single structure - Framebuffer and use it in rasterization backend.
./tensorflow_graphics/util/doc.py
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Query environment variable for documentation building.""" import os def _import_tfg_docs(): """Checks if __init__.py imports should be executed (for buildling docs).""" return os.getenv("TFG_DOC_IMPORTS", "0") == "1" def enable_tfg_doc_imports(): """Re-enables the imports in the __init__.py so that docs can be built.""" os.environ["TFG_DOC_IMPORTS"] = "1"
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Query environment variable for documentation building.""" import os def _import_tfg_docs(): """Checks if __init__.py imports should be executed (for buildling docs).""" return os.getenv("TFG_DOC_IMPORTS", "0") == "1" def enable_tfg_doc_imports(): """Re-enables the imports in the __init__.py so that docs can be built.""" os.environ["TFG_DOC_IMPORTS"] = "1"
-1