docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Initialize a parameter range. Args: min_value (float or int): The minimum value for the range. max_value (float or int): The maximum value for the range. scaling_type (str): The scale used for searching the range during tuning (default: 'Auto'). Valid values: 'Auto', 'Linear', 'Logarithmic' and 'ReverseLogarithmic'.
def __init__(self, min_value, max_value, scaling_type='Auto'): self.min_value = min_value self.max_value = max_value self.scaling_type = scaling_type
100,578
Represent the parameter range as a dicionary suitable for a request to create an Amazon SageMaker hyperparameter tuning job. Args: name (str): The name of the hyperparameter. Returns: dict[str, str]: A dictionary that contains the name and values of the hyperparameter.
def as_tuning_range(self, name): return {'Name': name, 'MinValue': to_str(self.min_value), 'MaxValue': to_str(self.max_value), 'ScalingType': self.scaling_type}
100,579
Initialize a ``CategoricalParameter``. Args: values (list or object): The possible values for the hyperparameter. This input will be converted into a list of strings.
def __init__(self, values): # pylint: disable=super-init-not-called if isinstance(values, list): self.values = [to_str(v) for v in values] else: self.values = [to_str(values)]
100,580
Return the role ARN whose credentials are used to call the API. Throws an exception if Args: sagemaker_session(Session): Current sagemaker session Returns: (str): The role ARN
def get_execution_role(sagemaker_session=None): if not sagemaker_session: sagemaker_session = Session() arn = sagemaker_session.get_caller_identity_arn() if ':role/' in arn: return arn message = 'The current AWS identity is not a role: {}, therefore it cannot be used as a SageMaker execution role' raise ValueError(message.format(arn))
100,596
Stop the Amazon SageMaker hyperparameter tuning job with the specified name. Args: name (str): Name of the Amazon SageMaker hyperparameter tuning job. Raises: ClientError: If an error occurs while trying to stop the hyperparameter tuning job.
def stop_tuning_job(self, name): try: LOGGER.info('Stopping tuning job: {}'.format(name)) self.sagemaker_client.stop_hyper_parameter_tuning_job(HyperParameterTuningJobName=name) except ClientError as e: error_code = e.response['Error']['Code'] # allow to pass if the job already stopped if error_code == 'ValidationException': LOGGER.info('Tuning job: {} is already stopped or not running.'.format(name)) else: LOGGER.error('Error occurred while attempting to stop tuning job: {}. Please try again.'.format(name)) raise
100,615
Create a SageMaker Model Package from the results of training with an Algorithm Package Args: name (str): ModelPackage name description (str): Model Package description algorithm_arn (str): arn or name of the algorithm used for training. model_data (str): s3 URI to the model artifacts produced by training
def create_model_package_from_algorithm(self, name, description, algorithm_arn, model_data): request = { 'ModelPackageName': name, 'ModelPackageDescription': description, 'SourceAlgorithmSpecification': { 'SourceAlgorithms': [ { 'AlgorithmName': algorithm_arn, 'ModelDataUrl': model_data } ] } } try: LOGGER.info('Creating model package with name: {}'.format(name)) self.sagemaker_client.create_model_package(**request) except ClientError as e: error_code = e.response['Error']['Code'] message = e.response['Error']['Message'] if ( error_code == 'ValidationException' and 'ModelPackage already exists' in message ): LOGGER.warning('Using already existing model package: {}'.format(name)) else: raise
100,619
Wait for an Amazon SageMaker endpoint deployment to complete. Args: endpoint (str): Name of the ``Endpoint`` to wait for. poll (int): Polling interval in seconds (default: 5). Returns: dict: Return value from the ``DescribeEndpoint`` API.
def wait_for_model_package(self, model_package_name, poll=5): desc = _wait_until(lambda: _create_model_package_status(self.sagemaker_client, model_package_name), poll) status = desc['ModelPackageStatus'] if status != 'Completed': reason = desc.get('FailureReason', None) raise ValueError('Error creating model package {}: {} Reason: {}'.format( model_package_name, status, reason)) return desc
100,620
Update an Amazon SageMaker ``Endpoint`` according to the endpoint configuration specified in the request Raise an error if endpoint with endpoint_name does not exist. Args: endpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` to update. endpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to deploy. Returns: str: Name of the Amazon SageMaker ``Endpoint`` being updated.
def update_endpoint(self, endpoint_name, endpoint_config_name): if not _deployment_entity_exists(lambda: self.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)): raise ValueError('Endpoint with name "{}" does not exist; please use an existing endpoint name' .format(endpoint_name)) self.sagemaker_client.update_endpoint(EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name) return endpoint_name
100,623
Delete an Amazon SageMaker ``Endpoint``. Args: endpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` to delete.
def delete_endpoint(self, endpoint_name): LOGGER.info('Deleting endpoint with name: {}'.format(endpoint_name)) self.sagemaker_client.delete_endpoint(EndpointName=endpoint_name)
100,624
Delete an Amazon SageMaker endpoint configuration. Args: endpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to delete.
def delete_endpoint_config(self, endpoint_config_name): LOGGER.info('Deleting endpoint configuration with name: {}'.format(endpoint_config_name)) self.sagemaker_client.delete_endpoint_config(EndpointConfigName=endpoint_config_name)
100,625
Delete an Amazon SageMaker Model. Args: model_name (str): Name of the Amazon SageMaker model to delete.
def delete_model(self, model_name): LOGGER.info('Deleting model with name: {}'.format(model_name)) self.sagemaker_client.delete_model(ModelName=model_name)
100,626
Wait for an Amazon SageMaker training job to complete. Args: job (str): Name of the training job to wait for. poll (int): Polling interval in seconds (default: 5). Returns: (dict): Return value from the ``DescribeTrainingJob`` API. Raises: ValueError: If the training job fails.
def wait_for_job(self, job, poll=5): desc = _wait_until_training_done(lambda last_desc: _train_done(self.sagemaker_client, job, last_desc), None, poll) self._check_job_status(job, desc, 'TrainingJobStatus') return desc
100,627
Wait for an Amazon SageMaker Neo compilation job to complete. Args: job (str): Name of the compilation job to wait for. poll (int): Polling interval in seconds (default: 5). Returns: (dict): Return value from the ``DescribeCompilationJob`` API. Raises: ValueError: If the compilation job fails.
def wait_for_compilation_job(self, job, poll=5): desc = _wait_until(lambda: _compilation_job_status(self.sagemaker_client, job), poll) self._check_job_status(job, desc, 'CompilationJobStatus') return desc
100,628
Wait for an Amazon SageMaker hyperparameter tuning job to complete. Args: job (str): Name of the tuning job to wait for. poll (int): Polling interval in seconds (default: 5). Returns: (dict): Return value from the ``DescribeHyperParameterTuningJob`` API. Raises: ValueError: If the hyperparameter tuning job fails.
def wait_for_tuning_job(self, job, poll=5): desc = _wait_until(lambda: _tuning_job_status(self.sagemaker_client, job), poll) self._check_job_status(job, desc, 'HyperParameterTuningJobStatus') return desc
100,629
Wait for an Amazon SageMaker transform job to complete. Args: job (str): Name of the transform job to wait for. poll (int): Polling interval in seconds (default: 5). Returns: (dict): Return value from the ``DescribeTransformJob`` API. Raises: ValueError: If the transform job fails.
def wait_for_transform_job(self, job, poll=5): desc = _wait_until(lambda: _transform_job_status(self.sagemaker_client, job), poll) self._check_job_status(job, desc, 'TransformJobStatus') return desc
100,630
Check to see if the job completed successfully and, if not, construct and raise a ValueError. Args: job (str): The name of the job to check. desc (dict[str, str]): The result of ``describe_training_job()``. status_key_name (str): Status key name to check for. Raises: ValueError: If the training job fails.
def _check_job_status(self, job, desc, status_key_name): status = desc[status_key_name] # If the status is capital case, then convert it to Camel case status = _STATUS_CODE_TABLE.get(status, status) if status != 'Completed' and status != 'Stopped': reason = desc.get('FailureReason', '(No reason provided)') job_type = status_key_name.replace('JobStatus', ' job') raise ValueError('Error for {} {}: {} Reason: {}'.format(job_type, job, status, reason))
100,631
Wait for an Amazon SageMaker endpoint deployment to complete. Args: endpoint (str): Name of the ``Endpoint`` to wait for. poll (int): Polling interval in seconds (default: 5). Returns: dict: Return value from the ``DescribeEndpoint`` API.
def wait_for_endpoint(self, endpoint, poll=5): desc = _wait_until(lambda: _deploy_done(self.sagemaker_client, endpoint), poll) status = desc['EndpointStatus'] if status != 'InService': reason = desc.get('FailureReason', None) raise ValueError('Error hosting endpoint {}: {} Reason: {}'.format(endpoint, status, reason)) return desc
100,632
Expand an IAM role name into an ARN. If the role is already in the form of an ARN, then the role is simply returned. Otherwise we retrieve the full ARN and return it. Args: role (str): An AWS IAM role (either name or full ARN). Returns: str: The corresponding AWS IAM role ARN.
def expand_role(self, role): if '/' in role: return role else: return self.boto_session.resource('iam').Role(role).arn
100,636
Create a definition of a model which can be part of an Inference Pipeline Args: model_data (str): The S3 location of a SageMaker model data ``.tar.gz`` file. image (str): A Docker image URI. env (dict[str, str]): Environment variables to run with ``image`` when hosted in SageMaker (default: None).
def __init__(self, model_data, image, env=None): self.model_data = model_data self.image = image self.env = env
100,640
Return a :class:`~sagemaker.amazon.KNNModel` referencing the latest s3 model data produced by this Estimator. Args: vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model. Default: use subnets and security groups from this Estimator. * 'Subnets' (list[str]): List of subnet ids. * 'SecurityGroupIds' (list[str]): List of security group ids.
def create_model(self, vpc_config_override=VPC_CONFIG_DEFAULT): return KNNModel(self.model_data, self.role, sagemaker_session=self.sagemaker_session, vpc_config=self.get_vpc_config(vpc_config_override))
100,644
move source to destination. Can handle uploading to S3 Args: source (str): root directory to move destination (str): file:// or s3:// URI that source will be moved to. job_name (str): SageMaker job name. sagemaker_session (sagemaker.Session): a sagemaker_session to interact with S3 if needed Returns: (str): destination URI
def move_to_destination(source, destination, job_name, sagemaker_session): parsed_uri = urlparse(destination) if parsed_uri.scheme == 'file': recursive_copy(source, parsed_uri.path) final_uri = destination elif parsed_uri.scheme == 's3': bucket = parsed_uri.netloc path = "%s%s" % (parsed_uri.path.lstrip('/'), job_name) final_uri = 's3://%s/%s' % (bucket, path) sagemaker_session.upload_data(source, bucket, path) else: raise ValueError('Invalid destination URI, must be s3:// or file://, got: %s' % destination) shutil.rmtree(source) return final_uri
100,649
A wrapper around distutils.dir_util.copy_tree but won't throw any exception when the source directory does not exist. Args: source (str): source path destination (str): destination path
def recursive_copy(source, destination): if os.path.isdir(source): copy_tree(source, destination)
100,650
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded. Returns: dictionary: The transformed init_params
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(MXNet, cls)._prepare_init_params_from_job_description(job_details, model_channel_name) image_name = init_params.pop('image') framework, py_version, tag, _ = framework_name_from_image(image_name) if not framework: # If we were unable to parse the framework name from the image it is not one of our # officially supported images, in this case just add the image to the init params. init_params['image_name'] = image_name return init_params init_params['py_version'] = py_version # We switched image tagging scheme from regular image version (e.g. '1.0') to more expressive # containing framework version, device type and python version (e.g. '0.12-gpu-py2'). # For backward compatibility map deprecated image tag '1.0' to a '0.12' framework version # otherwise extract framework version from the tag itself. init_params['framework_version'] = '0.12' if tag == '1.0' else framework_version_from_tag(tag) training_job_name = init_params['base_job_name'] if framework != cls.__framework_name__: raise ValueError("Training job: {} didn't use image for requested framework".format(training_job_name)) return init_params
100,671
Convert the transform job description to init params that can be handled by the class constructor Args: job_details (dict): the returned job details from a describe_transform_job API call. Returns: dict: The transformed init_params
def _prepare_init_params_from_job_description(cls, job_details): init_params = dict() init_params['model_name'] = job_details['ModelName'] init_params['instance_count'] = job_details['TransformResources']['InstanceCount'] init_params['instance_type'] = job_details['TransformResources']['InstanceType'] init_params['volume_kms_key'] = job_details['TransformResources'].get('VolumeKmsKeyId') init_params['strategy'] = job_details.get('BatchStrategy') init_params['assemble_with'] = job_details['TransformOutput'].get('AssembleWith') init_params['output_path'] = job_details['TransformOutput']['S3OutputPath'] init_params['output_kms_key'] = job_details['TransformOutput'].get('KmsKeyId') init_params['accept'] = job_details['TransformOutput'].get('Accept') init_params['max_concurrent_transforms'] = job_details.get('MaxConcurrentTransforms') init_params['max_payload'] = job_details.get('MaxPayloadInMB') init_params['base_transform_job_name'] = job_details['TransformJobName'] return init_params
100,679
Returns the piece size (vocabulary size). Args: model_file: The sentencepiece model file path. model_proto: The sentencepiece model serialized proto. Either `model_file` or `model_proto` must be set. name: The name argument that is passed to the op function. Returns: A scalar representing the vocabulary size.
def piece_size(model_file=None, model_proto=None, name=None): return _gen_sentencepiece_processor_op.sentencepiece_get_piece_size( model_file=model_file, model_proto=model_proto, name=name)
100,688
Converts piece into vocabulary id. Args: input: An arbitrary tensor of string. model_file: The sentencepiece model file path. model_proto: The sentencepiece model serialized proto. Either `model_file` or `model_proto` must be set. name: The name argument that is passed to the op function. Returns: A tensor of int32 with the same shape as input.
def piece_to_id(input, model_file=None, model_proto=None, name=None): return _gen_sentencepiece_processor_op.sentencepiece_piece_to_id( input, model_file=model_file, model_proto=model_proto, name=name)
100,689
Converts vocabulary id into piece. Args: input: An arbitrary tensor of int32. model_file: The sentencepiece model file path. model_proto: The sentencepiece model serialized proto. Either `model_file` or `model_proto` must be set. name: The name argument that is passed to the op function. Returns: A tensor of string with the same shape as input.
def id_to_piece(input, model_file=None, model_proto=None, name=None): return _gen_sentencepiece_processor_op.sentencepiece_id_to_piece( input, model_file=model_file, model_proto=model_proto, name=name)
100,690
Returns true if input id is unknown piece. Args: input: An arbitrary tensor of int32. model_file: The sentencepiece model file path. model_proto: The sentencepiece model serialized proto. Either `model_file` or `model_proto` must be set. name: The name argument that is passed to the op function. Returns: A tensor of bool with the same shape as input.
def is_unknown(input, model_file=None, model_proto=None, name=None): return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type( input, model_file=model_file, model_proto=model_proto, name=name, piece_type=0)
100,691
Returns true if input id is control piece. Args: input: An arbitrary tensor of int32. model_file: The sentencepiece model file path. model_proto: The sentencepiece model serialized proto. Either `model_file` or `model_proto` must be set. name: The name argument that is passed to the op function. Returns: A tensor of bool with the same shape as input.
def is_control(input, model_file=None, model_proto=None, name=None): return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type( input, model_file=model_file, model_proto=model_proto, name=name, piece_type=1)
100,692
Returns true if input id is unused piece. Args: input: An arbitrary tensor of int32. model_file: The sentencepiece model file path. model_proto: The sentencepiece model serialized proto. Either `model_file` or `model_proto` must be set. name: The name argument that is passed to the op function. Returns: A tensor of bool with the same shape as input.
def is_unused(input, model_file=None, model_proto=None, name=None): return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type( input, model_file=model_file, model_proto=model_proto, name=name, piece_type=2)
100,693
Generates code samples. Args: max_length: int. max literal length. max_nest: int. max nesting level. ops: CodeOp. set of allowable operations. Returns: 1. (str) output value. 2. (str) Code operation.
def generate_code(max_length, max_nest, ops): stack = [] def fetch_one(): # Always use an existing nested value for one of the operands. if stack: return stack.pop() else: # Produce a numeral of max_length-digits. value = random.randint(10 ** (max_length - 1), 10 ** max_length - 1) code = str(value) return value, code def fetch(num_operands): values, codes = zip(*[fetch_one() for _ in six.moves.range(num_operands)]) return values, codes for _ in six.moves.range(max_nest): op = random.choice(ops) values, codes = fetch(op.num_operands) new_value = op.eval(values) new_code = op.get_code(codes) stack.append((new_value, "(" + new_code + ")")) final_value, final_code = stack.pop() final_code = final_code[1:-1] final_code.strip("()") if not op.is_memory: final_value = int(final_value) % 10 ** (max_length+1) return str(final_value), final_code
101,038
Defines tokens. Args: max_value: the maximum numeric range for the token. Returns: list of string tokens in vocabulary.
def get_tokens(max_value): vocab = [str(i) for i in range(max_value)] vocab = set(vocab) vocab.update(CodeOp.LITERALS) vocab.update(CodeOp.KEYWORDS) vocab |= set("".join(vocab)) return sorted(vocab)
101,039
Creates a TokenDataSource instance. Args: curriculum_obj: (LTECurriculum) determines sample complexity. batch_size: (int) Batch size to generate. max_len: (int) This is the maximum size of any given sample sequence. ops: (list(CodeOp)). Task operations that inherit from CodeOp(). token_by_char: (bool) Whether to tokenize by char ("detokenized") or by keyword, literals and numbers.
def __init__(self, curriculum_obj, batch_size, max_len, ops, token_by_char): # Create the token and inverse-token dicts and fix the UNK token. self._vocab_dict = collections.defaultdict(lambda: 0) self._vocab_dict[self.UNK] = 0 self._inv_vocab_dict = collections.defaultdict(lambda: self.UNK) self.curriculum_obj = curriculum_obj self._max_seq_length = max_len self._ops = ops self._token_by_char = token_by_char self._batch_size = batch_size # Construct the vocabulary. num_token_digits = 1 if token_by_char else curriculum_obj.max_length token_list = get_tokens(10 ** num_token_digits) self.vocab_size = 1 for token in self.DEFAULT_START_TOKENS + token_list: if token not in self._vocab_dict: self._vocab_dict[token] = self.vocab_size self._inv_vocab_dict[self.vocab_size] = token self.vocab_size += 1
101,050
Produces the list of integer indices corresponding to a token list. Args: char_input: The character string to be tokenized. max_len: Truncation length. by_char: If true each character is a token - otherwise alpha-numeric groupings are tokens. Returns: A padded list of string tokens and the true sequence length. Raises: ValueError: the token sequence is too long.
def tokenize(self, char_input, max_len, by_char=False): if by_char: tokenized_list = [self._vocab_dict[token] for token in char_input] else: tokenized_list = [] compound_token = "" for token in char_input: # Compose alphanumeric inputs into compound tokens. add_number = compound_token.isdigit() and not token.isdigit() add_word = compound_token.isalpha() and not token.isalpha() if add_number or add_word: tokenized_list.append(self._vocab_dict[compound_token]) compound_token = "" # Add token or build compound token. if token.isdigit(): compound_token += token elif token.isalpha(): compound_token += token else: tokenized_list.append(self._vocab_dict[token]) if compound_token: tokenized_list.append(self._vocab_dict[compound_token]) # To ensure uniform batch sequence length pad the sequence. seq_size = len(tokenized_list) if seq_size < max_len: padding = [self._vocab_dict[get_padding()]] * (max_len - seq_size) tokenized_list.extend(padding) elif seq_size > max_len: raise ValueError("Token sequence is too large: {}".format( len(tokenized_list))) return tokenized_list, seq_size
101,052
Returns an operations list based on the specified task index. Args: task_type: indicates the task type used. Returns: List of the eligible ops.
def get_task_ops(task_type=TaskType.ALG_CTRL): try: return LearnToExecuteState.TASK_TYPE_OPS[task_type] except KeyError: raise KeyError("Bad task_type '%s', check config." % task_type)
101,054
Creates a TokenDataSource instance. Args: data_file: file object containing text data to be tokenized. vocab_data_file: file object containing text data used to initialize the vocabulary.
def __init__(self, data_file, vocab_data_file): def reading_function(file_name): for root in self.ROOTS: file_path = os.path.join(root, file_name) if os.path.exists(file_path): break file_path = None assert file_path is not None, ("Couldn't locate %s in %r" % (file_name, self.ROOTS)) with open(file_path, mode="rb") as fp: return list(fp.read().decode().replace("\n", self.CHAR_EOS)) self._vocab_dict = {} self._inv_vocab_dict = {} token_list = reading_function(vocab_data_file) self.vocab_size = 0 for token in self.DEFAULT_START_TOKENS + token_list: if token not in self._vocab_dict: self._vocab_dict[token] = self.vocab_size self._inv_vocab_dict[self.vocab_size] = token self.vocab_size += 1 raw_data = reading_function(data_file) self.flat_data = np.array(self.tokenize(raw_data), dtype=np.int32) self.num_tokens = self.flat_data.shape[0]
101,066
Initializes a TinyShakespeare sequence data object. Args: num_steps: sequence_length. batch_size: batch size. subset: 'train', 'valid' or 'test'. random: boolean indicating whether to do random sampling of sequences. Default is false (sequential sampling). dtype: type of generated tensors (both observations and targets). name: object name. Raises: ValueError: if subset is not train, valid or test.
def __init__(self, num_steps=1, batch_size=1, subset="train", random=False, dtype=tf.float32, name="tiny_shakespeare_dataset"): if subset not in [self.TRAIN, self.VALID, self.TEST]: raise ValueError("subset should be %s, %s, or %s. Received %s instead." % (self.TRAIN, self.VALID, self.TEST, subset)) super(TinyShakespeareDataset, self).__init__(name=name) # Generate vocab from train set. self._vocab_file = "ts.train.txt" self._data_file = "ts.{}.txt".format(subset) self._num_steps = num_steps self._batch_size = batch_size self._random_sampling = random self._dtype = dtype self._data_source = TokenDataSource( data_file=self._data_file, vocab_data_file=self._vocab_file) self._vocab_size = self._data_source.vocab_size self._flat_data = self._data_source.flat_data self._n_flat_elements = self._data_source.num_tokens self._num_batches = self._n_flat_elements // (self._num_steps * batch_size) self._reset_head_indices() self._queue_capacity = 10
101,068
Returns cost. Args: logits: model output. target: target. Returns: Cross-entropy loss for a sequence of logits. The loss will be averaged across time steps if time_average_cost was enabled at construction time.
def cost(self, logits, target): logits = tf.reshape(logits, [self._num_steps * self._batch_size, -1]) target = tf.reshape(target, [self._num_steps * self._batch_size, -1]) xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=target) loss = tf.reduce_sum(xent) return loss / self._batch_size
101,073
Gets training and testing dataset iterators. Args: name: String. Name of dataset, either 'mnist' or 'cifar10'. train_batch_size: Integer. Batch size for training. test_batch_size: Integer. Batch size for testing. Returns: Dict containing: train_iterator: A tf.data.Iterator, over training data. test_iterator: A tf.data.Iterator, over test data. num_classes: Integer. Number of class labels.
def get_data(name, train_batch_size, test_batch_size): if name not in ['mnist', 'cifar10']: raise ValueError( 'Expected dataset \'mnist\' or \'cifar10\', but got %s' % name) dataset = getattr(tf.keras.datasets, name) num_classes = 10 # Extract the raw data. raw_data = dataset.load_data() (images_train, labels_train), (images_test, labels_test) = raw_data # Normalize inputs and fix types. images_train = images_train.astype(np.float32) / 255. images_test = images_test.astype(np.float32) / 255. labels_train = labels_train.astype(np.int32).squeeze() labels_test = labels_test.astype(np.int32).squeeze() # Add a dummy 'color channel' dimension if it is not present. if images_train.ndim == 3: images_train = np.expand_dims(images_train, -1) images_test = np.expand_dims(images_test, -1) # Put the data onto the graph as constants. train_data = tf.data.Dataset.from_tensor_slices((images_train, labels_train)) test_data = tf.data.Dataset.from_tensor_slices((images_test, labels_test)) # Create iterators for each dataset. train_iterator = ( train_data # Note: For larger datasets e.g. ImageNet, it will not be feasible to have # a shuffle buffer this large. .shuffle(buffer_size=len(images_train)) .batch(train_batch_size) .repeat() .make_one_shot_iterator() ) test_iterator = test_data.batch(test_batch_size).make_initializable_iterator() return dict( train_iterator=train_iterator, test_iterator=test_iterator, num_classes=num_classes)
101,075
Returns the name of the variable scope indicated by the given value. Args: value: String, variable scope, or object with `variable_scope` attribute (e.g., Sonnet module). Returns: The name (a string) of the corresponding variable scope. Raises: ValueError: If `value` does not identify a variable scope.
def get_variable_scope_name(value): # If the object has a "variable_scope" property, use it. value = getattr(value, "variable_scope", value) if isinstance(value, tf.VariableScope): return value.name elif isinstance(value, six.string_types): return value else: raise ValueError("Not a variable scope: {}".format(value))
101,076
Returns a tuple `tf.Variable`s in a scope for a given collection. Args: scope: `tf.VariableScope` or string to retrieve variables from. collection: Collection to restrict query to. By default this is `tf.Graphkeys.TRAINABLE_VARIABLES`, which doesn't include non-trainable variables such as moving averages. Returns: A tuple of `tf.Variable` objects.
def get_variables_in_scope(scope, collection=tf.GraphKeys.TRAINABLE_VARIABLES): scope_name = get_variable_scope_name(scope) if scope_name: # Escape the name in case it contains any "." characters. Add a closing # slash so we will not search any scopes that have this scope name as a # prefix. scope_name = re.escape(scope_name) + "/" return tuple(tf.get_collection(collection, scope_name))
101,077
Separates the sliced (partitioned) and unsliced variables in var_list. Args: var_list: a list of variables. Returns: A list of unsliced variables in var_list, and a dict mapping names to parts for the sliced variables in var_list.
def _get_sliced_variables(var_list): unsliced_variables = [] sliced_variables = collections.defaultdict(lambda: []) for var in var_list: if var._save_slice_info: sliced_variables[var._save_slice_info.full_name].append(var) else: unsliced_variables.append(var) return unsliced_variables, sliced_variables
101,085
Yields an iterator over (string, variable) pairs in the variable map. In general, variable maps map variable names to either a `tf.Variable`, or list of `tf.Variable`s (in case of sliced variables). Args: variable_map: dict, variable map over which to iterate. Yields: (string, tf.Variable) pairs.
def variable_map_items(variable_map): for key, var_or_vars in six.iteritems(variable_map): if isinstance(var_or_vars, (list, tuple)): for variable in var_or_vars: yield key, variable else: yield key, var_or_vars
101,090
Returns the device with an annotation specifying `ResourceVariable`. "legacy" means a normal tf.Variable while "resource" means a ResourceVariable. For example: `(legacy)` `(resource)` `/job:learner/task:0/device:CPU:* (legacy)` `/job:learner/task:0/device:CPU:* (resource)` Args: var: The Tensorflow Variable to print.
def _format_device(var): if var.dtype.name.endswith("_ref"): resource_var_annotation = "(legacy)" else: resource_var_annotation = "(resource)" if var.device: return "{} {}".format(var.device, resource_var_annotation) else: return resource_var_annotation
101,092
Logs variable information. This function logs the name, shape, type, collections, and device for either all variables or a given iterable of variables. In the "Device" columns, the nature of the variable (legacy or resource (for ResourceVariables)) is also specified in parenthesis. Args: variables: iterable of variables; if not provided, then all variables (in the default graph) are logged.
def log_variables(variables=None): if variables is None: variables = tf.global_variables() + tf.local_variables() for row in format_variables(variables, join_lines=False): tf.logging.info(row)
101,095
Returns a dict mapping dtypes to number of variables and scalars. Args: variables: iterable of `tf.Variable`s, or None. If None is passed, then all global and local variables in the current graph are used. Returns: A dict mapping tf.dtype keys to a dict containing the keys 'num_scalars' and 'num_variables'.
def count_variables_by_type(variables=None): if variables is None: variables = tf.global_variables() + tf.local_variables() unique_types = set(v.dtype.base_dtype for v in variables) results_dict = {} for dtype in unique_types: if dtype == tf.string: tf.logging.warning( "NB: string Variables present. The memory usage for these Variables " "will not be accurately computed as it depends on the exact strings " "stored in a particular session.") vars_of_type = [v for v in variables if v.dtype.base_dtype == dtype] num_scalars = sum(v.shape.num_elements() for v in vars_of_type) results_dict[dtype] = { "num_variables": len(vars_of_type), "num_scalars": num_scalars } return results_dict
101,098
Split the first dimension of a tensor. Args: tensor: Tensor to have its first dimension split. inputs: Original reference input to look the dimensions of. n_dims: Number of dimensions to split. Returns: The input tensor, with its first dimension split.
def split_leading_dim(tensor, inputs, n_dims=2): input_shape_static = inputs.get_shape() input_shape_list = input_shape_static.as_list() tensor_shape_static = tensor.get_shape() tensor_shape_list = tensor_shape_static.as_list() if (input_shape_static.is_fully_defined() and tensor_shape_static.is_fully_defined()): new_shape = input_shape_list[:n_dims] + tensor_shape_list[1:] return tf.reshape(tensor, new_shape) # Shape can't be inferred statically. dims_after_first = tf.shape(tensor)[1:] split_sizes = tf.shape(inputs)[:n_dims] known_split_sizes = input_shape_list[:n_dims] known_dims_after_first = tensor_shape_list[1:] output_size = tf.concat([split_sizes, dims_after_first], 0) result = tf.reshape(tensor, output_size) result.set_shape(known_split_sizes + known_dims_after_first) return result
101,108
Returns a cloned `Linear` module. Args: name: Optional string assigning name of cloned module. The default name is constructed by appending "_clone" to `self.module_name`. Returns: Cloned `Linear` module.
def clone(self, name=None): if name is None: name = self.module_name + "_clone" return Linear(output_size=self.output_size, use_bias=self._use_bias, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, name=name)
101,114
Returns transposed `AddBias` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.module_name`. Returns: Transposed `AddBias` module.
def transpose(self, name=None): if name is None: name = self.module_name + "_transpose" return AddBias(output_shape=lambda: self._input_shape, bias_dims=self._bias_dims, initializers=self._initializers, regularizers=self._regularizers, name=name)
101,119
Replaces the -1 wildcard in the output shape vector. This function infers the correct output shape given the input dimensions. Args: dimensions: List of input non-batch dimensions. Returns: Tuple of non-batch output dimensions.
def _infer_shape(self, dimensions): # Size of input n = np.prod(dimensions) # Size of output where defined m = np.prod(abs(np.array(self._shape))) # Replace wildcard v = np.array(self._shape) v[v == -1] = n // m return tuple(v)
101,121
Connects the SliceByDim module into the graph. Args: inputs: `Tensor` to slice. Its rank must be greater than the maximum dimension specified in `dims` (plus one as python is 0 indexed). Returns: The sliced tensor. Raises: ValueError: If `inputs` tensor has insufficient rank.
def _build(self, inputs): shape_inputs = inputs.get_shape().as_list() rank = len(shape_inputs) # Checks that the rank of the tensor. max_dim = np.max(self._dims) + 1 if rank < max_dim: raise ValueError("Rank of inputs must be at least {}.".format(max_dim)) # Builds default lists for begin and size to pass to `tf.slice`. full_begin = [0] * rank full_size = [-1] * rank # Updates lists with what the user provided. for dim, begin, size in zip(self._dims, self._begin, self._size): full_begin[dim] = begin full_size[dim] = size return tf.slice(inputs, begin=full_begin, size=full_size)
101,131
Constructs the `TileByDim` module. Args: dims: The dimensions to tile along, as a list of unique integers. multiples: The multiple of the tiling, as a list of integers. Must be the same length as the `dims` list. name: The name of the module. Raises: ValueError: If `dims` has non-unique integers, or if the size of `multiples` is different from the size of `dims`.
def __init__(self, dims, multiples, name="tile_by_dim"): super(TileByDim, self).__init__(name=name) self._dims = dims self._multiples = multiples if np.unique(dims).size != len(dims): raise ValueError("dims must not have any repeated integers.") if len(multiples) != len(dims): raise ValueError( "multiples must have the same length as dims: {}.".format(len(dims)))
101,132
Connects the `TileByDim` module into the graph. Args: inputs: `Tensor` to tile. Returns: The tiled tensor.
def _build(self, inputs): shape_inputs = inputs.get_shape().as_list() rank = len(shape_inputs) # Builds default lists for multiples to pass to `tf.tile`. full_multiples = [1] * rank # Updates lists with what the user provided. for dim, multiple in zip(self._dims, self._multiples): full_multiples[dim] = multiple return tf.tile(inputs, multiples=full_multiples)
101,133
Constructs the MergeDims module. Args: start: Start of the range of dimensions to merge. size: Size the range of dimensions to merge. name: The name of the module. Raises: ValueError: If `size` is not strictly greater than 1.
def __init__(self, start, size, name="merge_dims"): super(MergeDims, self).__init__(name=name) self._start = start self._size = size # Checks for non consecutive integers. if size <= 1: raise ValueError("`size` should be strictly greater than 1.")
101,134
Connects the MergeDims module into the graph. Args: inputs: Tensor or a nested list of Tensors to merge. Its rank must be greater than or equal to `start` + `size`. Returns: The merged Tensor or a nested list of merged Tensors. Raises: ValueError: If any of the `inputs` tensors has insufficient rank.
def _build(self, inputs): if nest.is_sequence(inputs): merged_tensors = [self._merge(tensor) for tensor in nest.flatten(inputs)] return nest.pack_sequence_as(inputs, merged_tensors) # inputs is a single tf.Tensor return self._merge(inputs)
101,136
Module constructor. Args: idx: Indexes of the tensors to select. If `idx` is an integer, then a `Tensor` is returned. If `idx` is a (nested) list/tuple, then a (nested) tuple of `Tensor` is returned. name: Name of the module. Raises: TypeError: If `idx` is not an list, tuple or integer.
def __init__(self, idx, name="select_input"): super(SelectInput, self).__init__(name=name) self._check_type(idx) self._idx = idx
101,137
Perform multi-head attention from 'Attention is All You Need'. Implementation of the attention mechanism from https://arxiv.org/abs/1706.03762. Args: memory: Memory tensor to perform attention on. Returns: new_memory: New memory tensor.
def _multihead_attention(self, memory): key_size = self._key_size value_size = self._head_size qkv_size = 2 * key_size + value_size total_size = qkv_size * self._num_heads # Denote as F. qkv = basic.BatchApply(basic.Linear(total_size))(memory) qkv = basic.BatchApply(layer_norm.LayerNorm())(qkv) mem_slots = memory.get_shape().as_list()[1] # Denoted as N. # [B, N, F] -> [B, N, H, F/H] qkv_reshape = basic.BatchReshape([mem_slots, self._num_heads, qkv_size])(qkv) # [B, N, H, F/H] -> [B, H, N, F/H] qkv_transpose = tf.transpose(qkv_reshape, [0, 2, 1, 3]) q, k, v = tf.split(qkv_transpose, [key_size, key_size, value_size], -1) q *= key_size ** -0.5 dot_product = tf.matmul(q, k, transpose_b=True) # [B, H, N, N] weights = tf.nn.softmax(dot_product) output = tf.matmul(weights, v) # [B, H, N, V] # [B, H, N, V] -> [B, N, H, V] output_transpose = tf.transpose(output, [0, 2, 1, 3]) # [B, N, H, V] -> [B, N, H * V] new_memory = basic.BatchFlatten(preserve_dims=2)(output_transpose) return new_memory
101,142
Create input and forget gates for this step using `inputs` and `memory`. Args: inputs: Tensor input. memory: The current state of memory. Returns: input_gate: A LSTM-like insert gate. forget_gate: A LSTM-like forget gate.
def _create_gates(self, inputs, memory): # We'll create the input and forget gates at once. Hence, calculate double # the gate size. num_gates = 2 * self._calculate_gate_size() memory = tf.tanh(memory) inputs = basic.BatchFlatten()(inputs) gate_inputs = basic.BatchApply(basic.Linear(num_gates), n_dims=1)(inputs) gate_inputs = tf.expand_dims(gate_inputs, axis=1) gate_memory = basic.BatchApply(basic.Linear(num_gates))(memory) gates = tf.split(gate_memory + gate_inputs, num_or_size_splits=2, axis=2) input_gate, forget_gate = gates input_gate = tf.sigmoid(input_gate + self._input_bias) forget_gate = tf.sigmoid(forget_gate + self._forget_bias) return input_gate, forget_gate
101,143
Perform multiheaded attention over `memory`. Args: memory: Current relational memory. Returns: The attended-over memory.
def _attend_over_memory(self, memory): attention_mlp = basic.BatchApply( mlp.MLP([self._mem_size] * self._attention_mlp_layers)) for _ in range(self._num_blocks): attended_memory = self._multihead_attention(memory) # Add a skip connection to the multiheaded attention's input. memory = basic.BatchApply(layer_norm.LayerNorm())( memory + attended_memory) # Add a skip connection to the attention_mlp's input. memory = basic.BatchApply(layer_norm.LayerNorm())( attention_mlp(memory) + memory) return memory
101,144
Adds relational memory to the TensorFlow graph. Args: inputs: Tensor input. memory: Memory output from the previous time step. treat_input_as_matrix: Optional, whether to treat `input` as a sequence of matrices. Defaulta to False, in which case the input is flattened into a vector. Returns: output: This time step's output. next_memory: The next version of memory to use.
def _build(self, inputs, memory, treat_input_as_matrix=False): if treat_input_as_matrix: inputs = basic.BatchFlatten(preserve_dims=2)(inputs) inputs_reshape = basic.BatchApply( basic.Linear(self._mem_size), n_dims=2)(inputs) else: inputs = basic.BatchFlatten()(inputs) inputs = basic.Linear(self._mem_size)(inputs) inputs_reshape = tf.expand_dims(inputs, 1) memory_plus_input = tf.concat([memory, inputs_reshape], axis=1) next_memory = self._attend_over_memory(memory_plus_input) n = inputs_reshape.get_shape().as_list()[1] next_memory = next_memory[:, :-n, :] if self._gate_style == 'unit' or self._gate_style == 'memory': self._input_gate, self._forget_gate = self._create_gates( inputs_reshape, memory) next_memory = self._input_gate * tf.tanh(next_memory) next_memory += self._forget_gate * memory output = basic.BatchFlatten()(next_memory) return output, next_memory
101,145
Returns a decorator to copy documentation from the given function. Docstring is copied, including *args and **kwargs documentation. Args: fn_with_doc_to_copy: Function whose docstring, including *args and **kwargs documentation, is to be copied. Returns: Decorated version of `wrapper_init` with documentation copied from `fn_with_doc_to_copy`.
def with_doc(fn_with_doc_to_copy): def decorator(wrapper_init): # Wrap the target class's constructor (to assume its docstring), # but invoke the wrapper class's constructor. @wrapt.decorator def wrapping_fn(unused_wrapped, instance, args, kwargs): wrapper_init(instance, *args, **kwargs) return wrapping_fn(fn_with_doc_to_copy) # pylint: disable=no-value-for-parameter return decorator
101,150
Constructs the cell, within this module's variable scope. Args: cell_ctor: Callable that instantiates a `tf.contrib.rnn.RNNCell`. *args: Arguments to pass to `cell_ctor`. **kwargs: Keyword arguments to pass to `cell_ctor`. If `name` is provided, it is passed to `RNNCore.__init__` as well. If `custom_getter` is provided, it is passed to `RNNCore.__init__` but not to `cell_ctor`.
def __init__(self, cell_ctor, *args, **kwargs): super(RNNCellWrapper, self).__init__( name=kwargs.get("name"), custom_getter=kwargs.pop("custom_getter", None)) with self._enter_variable_scope(): self._cell = cell_ctor(*args, **kwargs)
101,153
Connects the LayerNorm module into the graph. Args: inputs: a Tensor of dimensionality >= 2. Returns: normalized: layer normalized outputs with same shape as inputs. Raises: base.NotSupportedError: If `inputs` has less than 2 dimensions.
def _build(self, inputs): if self._axis is None: axis = list(range(1, inputs.shape.ndims)) else: axis = self._axis original_dtype = inputs.dtype if original_dtype in [tf.float16, tf.bfloat16]: inputs = tf.cast(inputs, tf.float32) if inputs.get_shape().ndims < 2: raise base.NotSupportedError( "Layer normalization expects inputs of at least rank 2." " Got inputs of rank {}.".format(inputs.get_shape().ndims)) # Shape for the learnable scale and offset is the number of channels. See # https://arxiv.org/pdf/1803.08494.pdf around equation 6. params_shape = inputs.get_shape()[-1:] if self._scale: if self.GAMMA not in self._initializers: self._initializers[self.GAMMA] = create_gamma_initializer() self._gamma = tf.get_variable( self.GAMMA, shape=params_shape, dtype=inputs.dtype, initializer=self._initializers[self.GAMMA], partitioner=self._partitioners.get(self.GAMMA), regularizer=self._regularizers.get(self.GAMMA)) else: self._gamma = None if self._offset: if self.BETA not in self._initializers: self._initializers[self.BETA] = create_beta_initializer() self._beta = tf.get_variable( self.BETA, shape=params_shape, dtype=inputs.dtype, initializer=self._initializers[self.BETA], partitioner=self._partitioners.get(self.BETA), regularizer=self._regularizers.get(self.BETA)) else: self._beta = None mean, var = tf.nn.moments(inputs, axis, keep_dims=True) normalized = tf.nn.batch_normalization(inputs, mean, var, self._beta, self._gamma, self._eps) if original_dtype in [tf.float16, tf.bfloat16]: normalized = tf.cast(normalized, dtype=original_dtype) return normalized
101,155
Initialize AttentiveRead module. Args: attention_logit_mod: Module that produces logit corresponding to a memory slot's compatibility. Must map a [batch_size * memory_size, memory_word_size + query_word_size]-shaped Tensor to a [batch_size * memory_size, 1] shape Tensor. name: string. Name for module.
def __init__(self, attention_logit_mod, name="attention"): super(AttentiveRead, self).__init__(name=name) self._attention_logit_mod = attention_logit_mod
101,156
Builds the statistics part of the graph when using moving variance. Args: input_batch: Input batch Tensor. use_batch_stats: Boolean to indicate if batch statistics should be calculated, otherwise moving averages are returned. stat_dtype: TensorFlow datatype to use for the moving mean and variance. Returns: Tuple of (mean, variance), each of the same datatype as `input_batch`.
def _build_statistics(self, input_batch, use_batch_stats, stat_dtype): # Set up our moving statistics. When connecting in parallel, this is shared. if self.MOVING_MEAN not in self._initializers: self._initializers[self.MOVING_MEAN] = create_mean_initializer() self._moving_mean = tf.get_variable( "moving_mean", dtype=stat_dtype, shape=(self._num_channels,), collections=[ tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES, ], initializer=self._initializers[self.MOVING_MEAN], trainable=False) if self.MOVING_VARIANCE not in self._initializers: self._initializers[self.MOVING_VARIANCE] = create_variance_initializer() self._moving_variance = tf.get_variable( "moving_variance", dtype=stat_dtype, shape=(self._num_channels,), collections=[ tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES, ], initializer=self._initializers[self.MOVING_VARIANCE], trainable=False) def build_batch_stats(): mean, variance = tf.nn.moments(input_batch, self._axis, keep_dims=True, name="normalize_moments") return mean, variance def build_moving_stats(): # If necessary, cast the moving statistics to match the input type. # This is required by tf.nn.batch_normalization. input_dtype = input_batch.dtype.base_dtype if stat_dtype == input_dtype: return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance), ) else: return ( tf.cast(self._moving_mean, input_dtype), tf.cast(self._moving_variance, input_dtype), ) mean, variance = utils.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats, ) return mean, variance
101,159
Builds the moving average update ops when using moving variance. Args: mean: The mean value to update with. variance: The variance value to update with. is_training: Boolean Tensor to indicate if we're currently in training mode. Returns: Tuple of `(update_mean_op, update_variance_op)` when `is_training` is or could be `True`. Returns `None` when `is_training=False`.
def _build_update_ops(self, mean, variance, is_training): def build_update_ops(): update_mean_op = moving_averages.assign_moving_average( variable=self._moving_mean, value=tf.reshape(mean, (self._num_channels,)), decay=self._decay_rate, zero_debias=False, name="update_moving_mean").op update_variance_op = moving_averages.assign_moving_average( variable=self._moving_variance, value=tf.reshape(variance, (self._num_channels,)), decay=self._decay_rate, zero_debias=False, name="update_moving_variance").op return update_mean_op, update_variance_op def build_no_ops(): return (tf.no_op(), tf.no_op()) # Only make the ops if we know that `is_training=True`, or the value of # `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_variance_op = utils.smart_cond( is_training, build_update_ops, build_no_ops, ) return (update_mean_op, update_variance_op) else: return None
101,160
Create a dilated convolution layer. Args: output_channels: int. Number of output channels for each pixel. dilation_rate: int. Represents how many pixels each stride offset will move. A value of 1 indicates a standard convolution. apply_relu: bool. If True, a ReLU non-linearlity is added. name: string. Name for layer. Returns: a sonnet Module for a dilated convolution.
def _dilated_conv_layer(self, output_channels, dilation_rate, apply_relu, name): layer_components = [ conv.Conv2D( output_channels, [3, 3], initializers=self._initializers, regularizers=self._regularizers, rate=dilation_rate, name="dilated_conv_" + name), ] if apply_relu: layer_components.append(lambda net: tf.nn.relu(net, name="relu_" + name)) return sequential.Sequential(layer_components, name=name)
101,172
Constructs a new `BlockDiagonalMatrix` module. Args: block_shape: tuple, 2-dimensional tuple indicating the shape of each individual block. block_rows: int, the number of blocks in each row (and column) of the output matrix. name: string, name of the module.
def __init__(self, block_shape, block_rows, name='block_diagonal_matrix'): super(BlockDiagonalMatrix, self).__init__( block_shape=block_shape, block_rows=block_rows, include_diagonal=True, include_off_diagonal=False, name=name)
101,194
Initializes a contextual switch for a custom getter. Args: getter: The custom getter which we may want to switch on. verbose: Log out every time a variable is fetched, and whether or not `getter` is used. Returns: A custom getter which can also be used as a context manager. Entering the context enables the custom getter.
def __init__(self, getter, verbose=False): self._count = 0 self._getter = getter self._verbose = verbose
101,195
Dynamic unroll across input objects. Args: inputs: tensor (batch x num_objects x feature). Objects to sort. Returns: Tensor (batch x num_objects); logits indicating the reference objects.
def _build(self, inputs): batch_size = inputs.get_shape()[0] output_sequence, _ = tf.nn.dynamic_rnn( cell=self._core, inputs=inputs, time_major=False, initial_state=self._core.initial_state( batch_size, trainable=False) ) outputs = snt.BatchFlatten()(output_sequence[:, -1, :]) outputs = self._final_mlp(outputs) logits = snt.Linear(self._target_size)(outputs) return logits
101,199
This is the core model logic. Unrolls a Bayesian RNN over the given sequence. Args: data_ops: A `sequence_data.SequenceDataOps` namedtuple. embed_layer: A `snt.Embed` instance. rnn_core: A `snt.RNNCore` instance. output_linear: A `snt.Linear` instance. name_prefix: A string to use to prefix local variable names. Returns: A 3D time-major tensor representing the model's logits for a sequence of predictions. Shape `[time_steps, batch_size, vocab_size]`.
def build_logits(data_ops, embed_layer, rnn_core, output_linear, name_prefix): # Embed the input index sequence. embedded_input_seq = snt.BatchApply( embed_layer, name="input_embed_seq")(data_ops.sparse_obs) # Construct variables for holding the RNN state. initial_rnn_state = nest.map_structure( lambda t: tf.get_local_variable( # pylint: disable long lambda warning "{}/rnn_state/{}".format(name_prefix, t.op.name), initializer=t), rnn_core.initial_state(FLAGS.batch_size)) assign_zero_rnn_state = nest.map_structure( lambda x: x.assign(tf.zeros_like(x)), initial_rnn_state) assign_zero_rnn_state = tf.group(*nest.flatten(assign_zero_rnn_state)) # Unroll the RNN core over the sequence. rnn_output_seq, rnn_final_state = tf.nn.dynamic_rnn( cell=rnn_core, inputs=embedded_input_seq, initial_state=initial_rnn_state, time_major=True) # Persist the RNN state for the next unroll. update_rnn_state = nest.map_structure( tf.assign, initial_rnn_state, rnn_final_state) with tf.control_dependencies(nest.flatten(update_rnn_state)): rnn_output_seq = tf.identity(rnn_output_seq, name="rnn_output_seq") output_logits = snt.BatchApply( output_linear, name="output_embed_seq")(rnn_output_seq) return output_logits, assign_zero_rnn_state
101,219
Construct a SkipConnectionCore. Args: base_core: Base RNNCore to wrap. input_shape: Shape of the input as tuple, excluding the batch size. name: Name of the module.
def __init__(self, base_core, input_shape=None, name="skip_connection_core"): super(SkipConnectionCore, self).__init__(name=name) self._base_core = base_core self._input_shape = input_shape
101,238
LSTM with recurrent dropout. Args: hidden_size: the LSTM hidden size. keep_prob: the probability to keep an entry when applying dropout. **kwargs: Extra keyword arguments to pass to the LSTM. Returns: A tuple (train_lstm, test_lstm) where train_lstm is an LSTM with recurrent dropout enabled to be used for training and test_lstm is the same LSTM without recurrent dropout.
def lstm_with_recurrent_dropout(hidden_size, keep_prob=0.5, **kwargs): lstm = LSTM(hidden_size, **kwargs) return RecurrentDropoutWrapper(lstm, LSTMState(keep_prob, None)), lstm
101,245
Highway core with recurrent dropout. Args: hidden_size: (int) Hidden size dimensionality. num_layers: (int) Number of highway layers. keep_prob: the probability to keep an entry when applying dropout. **kwargs: Extra keyword arguments to pass to the highway core. Returns: A tuple (train_core, test_core) where train_core is a higway core with recurrent dropout enabled to be used for training and test_core is the same highway core without recurrent dropout.
def highway_core_with_recurrent_dropout( hidden_size, num_layers, keep_prob=0.5, **kwargs): core = HighwayCore(hidden_size, num_layers, **kwargs) return RecurrentDropoutWrapper(core, keep_prob), core
101,247
Builds a new wrapper around a given core. Args: core: the RNN core to be wrapped. keep_probs: the recurrent dropout keep probabilities to apply. This should have the same structure has core.init_state. No dropout is applied for leafs set to None.
def __init__(self, core, keep_probs): super(RecurrentDropoutWrapper, self).__init__( custom_getter=None, name=core.module_name + "_recdropout") self._core = core self._keep_probs = keep_probs # self._dropout_state_size is a list of shape for the state parts to which # dropout is to be applied. # self._dropout_index has the same shape as the core state. Leafs contain # either None if no dropout is applied or an integer representing an index # in self._dropout_state_size. self._dropout_state_size = [] def set_dropout_state_size(keep_prob, state_size): if keep_prob is not None: self._dropout_state_size.append(state_size) return len(self._dropout_state_size) - 1 return None self._dropout_indexes = tf.contrib.framework.nest.map_structure( set_dropout_state_size, keep_probs, core.state_size)
101,254
Returns new convolution. Args: use_bias: Use bias in convolutions. If False, clean_dict removes bias entries from initializers, partitioners and regularizers passed to the constructor of the convolution.
def _new_convolution(self, use_bias): def clean_dict(input_dict): if input_dict and not use_bias: cleaned_dict = input_dict.copy() cleaned_dict.pop("b", None) return cleaned_dict return input_dict return self._conv_class( output_channels=4*self._output_channels, kernel_shape=self._kernel_shape, stride=self._stride, rate=self._rate, padding=self._padding, use_bias=use_bias, initializers=clean_dict(self._initializers), partitioners=clean_dict(self._partitioners), regularizers=clean_dict(self._regularizers), name="conv")
101,271
Obtains the list flattened output sizes of a list of cores. Args: cores: list of cores to get the shapes from. Returns: List of lists that, for each core, contains the list of its output dimensions.
def _get_flat_core_sizes(cores): core_sizes_lists = [] for core in cores: flat_output_size = nest.flatten(core.output_size) core_sizes_lists.append( [tf.TensorShape(size).as_list() for size in flat_output_size]) return core_sizes_lists
101,282
Construct a Basic RNN core. Args: model: callable that computes the next state. name: name of the module. Raises: TypeError: if model is not a callable object or if it is an RNNCore. AttributeError: if model does not have an output_size attribute.
def __init__(self, model, name="model_rnn"): super(ModelRNN, self).__init__(name=name) if not callable(model): raise TypeError("Model must be callable.") if isinstance(model, rnn_core.RNNCore): raise TypeError("Model should not be an RNNCore.") try: self._output_size = model.output_size except AttributeError: raise AttributeError("Model should have an output_size attribute.") self._model = model
101,292
Construct a Bidirectional RNN core. Args: forward_core: callable RNNCore module that computes forward states. backward_core: callable RNNCore module that computes backward states. name: name of the module. Raises: ValueError: if not all the modules are recurrent.
def __init__(self, forward_core, backward_core, name="bidir_rnn"): super(BidirectionalRNN, self).__init__(name=name) self._forward_core = forward_core self._backward_core = backward_core def _is_recurrent(core): has_rnn_core_interface = (hasattr(core, "initial_state") and hasattr(core, "output_size") and hasattr(core, "state_size")) return isinstance(core, rnn_core.RNNCore) or has_rnn_core_interface if not(_is_recurrent(forward_core) and _is_recurrent(backward_core)): raise ValueError("Forward and backward cores must both be instances of" "RNNCore.")
101,294
Assembles the `MLP` and connects it to the graph. Args: inputs: A 2D Tensor of size `[batch_size, input_size]`. is_training: A bool or tf.Bool Tensor. Indicates whether we are currently training. Defaults to `True`. dropout_keep_prob: The probability that each element is kept when both `use_dropout` and `is_training` are True. Defaults to 0.5. Returns: A 2D Tensor of size `[batch_size, output_sizes[-1]]`.
def _build(self, inputs, is_training=True, dropout_keep_prob=0.5): self._input_shape = tuple(inputs.get_shape().as_list()) net = inputs final_index = self._num_layers - 1 for layer_id in xrange(self._num_layers): net = self._layers[layer_id](net) if final_index != layer_id or self._activate_final: # Only perform dropout whenever we are activating the layer's outputs. if self._use_dropout: keep_prob = utils.smart_cond( is_training, true_fn=lambda: dropout_keep_prob, false_fn=lambda: tf.constant(1.0) ) net = tf.nn.dropout(net, keep_prob=keep_prob) net = self._activation(net) return net
101,299
Returns transposed `MLP`. Args: name: Optional string specifying the name of the transposed module. The default name is constructed by appending "_transpose" to `self.module_name`. activate_final: Optional boolean determining if the activation and batch normalization, if turned on, are applied to the final layer. Returns: Matching transposed `MLP` module.
def transpose(self, name=None, activate_final=None): if name is None: name = self.module_name + "_transpose" if activate_final is None: activate_final = self.activate_final output_sizes = [lambda l=layer: l.input_shape[1] for layer in self._layers] output_sizes.reverse() return MLP( name=name, output_sizes=output_sizes, activation=self.activation, activate_final=activate_final, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, use_bias=self.use_bias, use_dropout=self.use_dropout)
101,301
Creates a new MLP with the same structure. Args: name: Optional string specifying the name of the new module. The default name is constructed by appending "_clone" to the original name. Returns: A cloned `MLP` module.
def clone(self, name=None): if name is None: name = self.module_name + "_clone" return MLP( name=name, output_sizes=self.output_sizes, activation=self.activation, activate_final=self.activate_final, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, use_bias=self.use_bias, use_dropout=self.use_dropout)
101,302
Calculates the minimum size of the input layer. Given a set of convolutional layers, calculate the minimum value of the `input_height` and `input_width`, i.e. such that the output has size 1x1. Assumes snt.VALID padding. Args: conv_layers: List of tuples `(output_channels, (kernel_size, stride), (pooling_size, pooling_stride))` Returns: Minimum value of input height and width.
def _calc_min_size(self, conv_layers): input_size = 1 for _, conv_params, max_pooling in reversed(conv_layers): if max_pooling is not None: kernel_size, stride = max_pooling input_size = input_size * stride + (kernel_size - stride) if conv_params is not None: kernel_size, stride = conv_params input_size = input_size * stride + (kernel_size - stride) return input_size
101,304
Assembles a batch of input tensors and output labels. Args: batch_size: int. number of sequence batches. num_objects: int. number of objects in the sequence. num_features: int. feature size of each object. Returns: 1. np.ndarray (`batch_size`, `num_objects`, (`num_features` + 3 * `num_objects`)). 2. np.ndarray (`batch_size`). Output object reference label.
def _get_batch_data(self, batch_size, num_objects, num_features): all_inputs = [] all_labels = [] for _ in six.moves.range(batch_size): inputs, labels = self._get_single_set(num_objects, num_features) all_inputs += [inputs] all_labels += [labels] input_data = np.concatenate(all_inputs, axis=0) label_data = np.concatenate(all_labels, axis=0) return input_data, label_data
101,310
Whether to use SAME or VALID for the underlying convolution op. Args: padding: A tuple of members of ALLOWED_PADDINGS, e.g. as returned from `_fill_and_verify_padding`. Returns: One of CONV_OP_ALLOWED_PADDINGS, the padding method to use for the underlying convolution op. Raises: ValueError: If padding is not a tuple.
def _padding_to_conv_op_padding(padding): if not isinstance(padding, tuple): raise ValueError("padding should be a tuple.") if all(p == SAME for p in padding): # If we want SAME padding for all dimensions then we can use SAME for the # conv and avoid doing any extra padding. return SAME else: # Otherwise we prefer to use VALID, since we can implement all the other # padding types just by adding some extra padding before doing a VALID conv. # (We could use SAME but then we'd also have to crop outputs in some cases). return VALID
101,316
Returns the index of the channel dimension. Args: data_format: A string of characters corresponding to Tensor dimensionality. Returns: channel_index: An integer indicating the channel dimension. Raises: ValueError: If no channel dimension was found.
def _find_channel_index(data_format): for i, c in enumerate(data_format): if c == "C": return i raise ValueError("data_format requires a channel dimension. Got: {}" .format(data_format))
101,320
Apply a convolution operation on `inputs` using variable `w`. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. w: A weight matrix of the same type as `inputs`. Returns: outputs: The result of the convolution operation on `inputs`.
def _apply_conv(self, inputs, w): outputs = tf.nn.convolution(inputs, w, strides=self._stride, padding=self._conv_op_padding, dilation_rate=self._rate, data_format=self._data_format) return outputs
101,325
Returns a cloned `_ConvND` module. Args: name: Optional string assigning name of cloned module. The default name is constructed by appending "_clone" to `self.module_name`. Returns: A copy of the current class.
def clone(self, name=None): if name is None: name = self.module_name + "_clone" return type(self)(output_channels=self.output_channels, kernel_shape=self._kernel_shape, stride=self._stride, rate=self._rate, padding=self._padding, use_bias=self._use_bias, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, mask=self._mask, data_format=self._data_format, custom_getter=self._custom_getter, name=name)
101,329
Calculate the output shape for `inputs` after a deconvolution. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: output_shape: A tensor of shape (`batch_size`, `conv_output_shape`).
def _infer_all_output_dims(self, inputs): # Use tensorflow shape op to manipulate inputs shape, so that unknown batch # size - which can happen when using input placeholders - is handled # correcly. batch_size = tf.expand_dims(tf.shape(inputs)[0], 0) out_channels = (self.output_channels,) # Height dim needs to be added to everything for 1D Conv # as we'll be using the 2D Conv Transpose op. if self._n == 1: out_shape = (1,) + self.output_shape else: out_shape = self.output_shape if self._data_format.startswith("NC"): out_shape_tuple = out_channels + out_shape elif self._data_format.startswith("N") and self._data_format.endswith("C"): out_shape_tuple = out_shape + out_channels output_shape = tf.concat([batch_size, out_shape_tuple], 0) return output_shape
101,332
Returns matching `Conv1D` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv1D` module.
def transpose(self, name=None): if name is None: name = self.module_name + "_transpose" if self._data_format == DATA_FORMAT_NWC: stride = self._stride[1:-1] else: # self._data_format == DATA_FORMAT_NCW stride = self._stride[2:] return Conv1D(output_channels=lambda: self.input_channels, kernel_shape=self.kernel_shape, stride=stride, padding=self.padding, use_bias=self._use_bias, initializers=self.initializers, partitioners=self.partitioners, regularizers=self.regularizers, data_format=self._data_format, custom_getter=self._custom_getter, name=name)
101,336
Returns matching `Conv2DTranspose` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv2DTranspose` module. Raises: base.NotSupportedError: If `rate` in any dimension > 1.
def transpose(self, name=None): if any(x > 1 for x in self._rate): raise base.NotSupportedError( "Cannot transpose a dilated convolution module.") if any(p != self._conv_op_padding for p in self._padding): raise base.NotSupportedError( "Cannot tranpose a convolution using mixed paddings or paddings " "other than SAME or VALID.") if name is None: name = self.module_name + "_transpose" def output_shape(): if self._data_format == DATA_FORMAT_NCHW: return self.input_shape[2:4] else: # data_format == DATA_FORMAT_NHWC return self.input_shape[1:3] return Conv2DTranspose(output_channels=lambda: self._input_channels, output_shape=output_shape, kernel_shape=self._kernel_shape, stride=self._stride, padding=self._conv_op_padding, use_bias=self._use_bias, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, data_format=self._data_format, custom_getter=self._custom_getter, name=name)
101,338
Returns matching `Conv2D` module. Args: name: Optional string assigning name of transpose module. The default name is constructed by appending "_transpose" to `self.name`. Returns: `Conv2D` module.
def transpose(self, name=None): if name is None: name = self.module_name + "_transpose" if self._data_format == DATA_FORMAT_NHWC: stride = self._stride[1:-1] else: # self._data_format == DATA_FORMAT_NCHW stride = self._stride[2:] return Conv2D(output_channels=lambda: self.input_channels, kernel_shape=self._kernel_shape, stride=stride, padding=self._padding, use_bias=self._use_bias, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, data_format=self._data_format, custom_getter=self._custom_getter, name=name)
101,340
Construct the convolution weight matrix. Figures out the shape of the weight matrix, initialize it, and return it. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: w: A weight matrix of the same type as `inputs` and of shape [kernel_shape, 1, 1].
def _construct_w(self, inputs): weight_shape = self._kernel_shape + (1, 1) if "w" not in self._initializers: self._initializers["w"] = create_weight_initializer(weight_shape[:2], dtype=inputs.dtype) w = tf.get_variable("w", shape=weight_shape, dtype=inputs.dtype, initializer=self._initializers["w"], partitioner=self._partitioners.get("w", None), regularizer=self._regularizers.get("w", None)) return w
101,342
Apply a depthwise_conv2d operation on `inputs` using variable `w`. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. w: A weight matrix of the same type as `inputs`. Returns: outputs: The result of the convolution operation on `inputs`.
def _apply_conv(self, inputs, w): tiled_weights = tf.tile(w, [1, 1, self._input_channels, 1]) outputs = tf.nn.depthwise_conv2d(inputs, tiled_weights, strides=self.stride, padding=self._conv_op_padding, data_format=self._data_format) return outputs
101,343
Apply a `separable_conv2d` operation on `inputs` using `w`. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. w: A tuple of weight matrices of the same type as `inputs`, the first being the depthwise weight matrix, and the second being the pointwise weight matrix. Returns: outputs: The result of the convolution operation on `inputs`.
def _apply_conv(self, inputs, w): w_dw, w_pw = w outputs = tf.nn.separable_conv2d(inputs, w_dw, w_pw, rate=self._rate, strides=self.stride, padding=self._conv_op_padding, data_format=self._data_format) return outputs
101,346
Apply a `separable_conv2d` operation on `inputs` using `w`. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. w: A tuple of weight matrices of the same type as `inputs`, the first being the depthwise weight matrix, and the second being the pointwise weight matrix. Returns: outputs: The result of the convolution operation on `inputs`.
def _apply_conv(self, inputs, w): if self._data_format == DATA_FORMAT_NWC: h_dim = 1 two_dim_conv_data_format = DATA_FORMAT_NHWC else: h_dim = 2 two_dim_conv_data_format = DATA_FORMAT_NCHW inputs = tf.expand_dims(inputs, axis=h_dim) two_dim_conv_stride = self.stride[:h_dim] + (1,) + self.stride[h_dim:] # Height always precedes width. two_dim_conv_rate = (1,) + self._rate w_dw, w_pw = w outputs = tf.nn.separable_conv2d(inputs, w_dw, w_pw, strides=two_dim_conv_stride, rate=two_dim_conv_rate, padding=self._conv_op_padding, data_format=two_dim_conv_data_format) outputs = tf.squeeze(outputs, [h_dim]) return outputs
101,347
Connects the Sequential module into the graph. Args: *args: A tuple of inputs, to be unpacked as the arguments to the first layer. Returns: The output value of the last layer.
def _build(self, *args): net = args if not self._layers: # If the sequential is passed a single arg, this will end up being # wrapped in an extra layer of tuple by *args. Normally we internally # handle this in the loop below, but if there are no layers we unpack here # in order to make Sequential([]) act like an identity, which seems right. if len(args) == 1: return args[0] else: return args for layer in self._layers: if isinstance(net, tuple): net = layer(*net) else: net = layer(net) return net
101,349
Creates a custom getter that applies specified named arguments. Args: **kwargs: Overriding arguments for the custom getter to use in preference the named arguments it's called with. Returns: Custom getter.
def override_args(**kwargs): override_kwargs = kwargs def custom_getter(getter, *args, **kwargs): kwargs.update(override_kwargs) return getter(*args, **kwargs) return custom_getter
101,351
Creates a custom getter that applies specified named arguments. The returned custom getter treats the specified named arguments as revised defaults, and does not override any non-`None` argument values supplied by the original get_variable call (or by a nested scope's custom getter). Args: **kwargs: Overriding arguments for the custom getter to use in preference the named arguments it's called with. Returns: Custom getter.
def override_default_args(**kwargs): override_default_kwargs = kwargs def custom_getter(getter, *args, **kwargs): updated_kwargs = override_default_kwargs.copy() updated_kwargs.update({kw: value for kw, value in six.iteritems(kwargs) if value is not None}) return getter(*args, **updated_kwargs) return custom_getter
101,352
Serializes a `tf.SparseTensor` into `nested_proto`. Args: sparse_tensor: An instance of `tf.SparseTensor`. nested_proto: A `module_pb2.NestedData` instance to be filled from `sparse_tensor`. process_leafs: A function to be applied to the leaf valued of the nested structure. already_processed: Set of already processed objects (used to avoid infinite recursion).
def _to_proto_sparse_tensor(sparse_tensor, nested_proto, process_leafs, already_processed): already_processed.add(id(sparse_tensor)) nested_proto.named_tuple.name = _SPARSE_TENSOR_NAME for str_key in _SPARSE_TENSOR_FIELD: tensor = getattr(sparse_tensor, str_key) nested_proto.named_tuple.map[str_key].value = process_leafs(tensor)
101,355