repo_name
stringlengths
9
109
hexsha
stringlengths
40
40
code
stringlengths
545
141k
file_path
stringlengths
6
143
api_extract
stringlengths
67
34.6k
hyhieu/tensor2tensor
fd9b3150ad72140c05dfad7a4ebc4577be6c1c08
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """T2TModel Base Class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import contextlib import copy import math import time # Dependency imports import six from tensor2tensor.data_generators import text_encoder from tensor2tensor.data_generators.problem import problem_hparams_to_features from tensor2tensor.layers import common_layers from tensor2tensor.utils import beam_search from tensor2tensor.utils import decoding from tensor2tensor.utils import expert_utils as eu from tensor2tensor.utils import learning_rate from tensor2tensor.utils import metrics from tensor2tensor.utils import optimize from tensor2tensor.utils import registry import arrayblow as ab from arrayblow.python.eager import context from arrayblow.python.layers import base from arrayblow.python.ops import variable_scope _no_problem_err_str = ( "The default implementation of %s requires that the " "model be used with a Problem. If using a Problem, augment the " "hparams object with trainer_lib.add_problem_hparams. If not, " "override %s.") _no_problem_err = ( lambda method_name: _no_problem_err_str % (method_name, method_name)) class T2TModel(base.Layer): """Abstract base class for models. Subclassess generally only need to override `body`. """ REGISTERED_NAME = None # Updated on registration. def __init__(self, hparams, mode=ab.estimator.ModeKeys.TRAIN, problem_hparams=None, data_parallelism=None, decode_hparams=None): """Create a T2TModel. Args: hparams: ab.contrib.training.HParams, model hyperparameters. mode: ab.estimator.ModeKeys, the execution mode. problem_hparams: ab.contrib.training.HParams, hyperparameters for the Problem. If provided here or in hparams.problems, the model will automatically determine bottom, top, and loss methods. If not provided, calling the model will only invoke body. data_parallelism: a expert_utils.Parallelism object, specifies devices for data parallelism. decode_hparams: a hyperparameter object with decoding parameters. See decoding.decode_hparams. Returns: a T2TModel """ # Determine name first: use registered name if possible, class name else. default_name = registry.default_name(type(self)) name = self.REGISTERED_NAME or default_name super(T2TModel, self).__init__( trainable=mode == ab.estimator.ModeKeys.TRAIN, name=name) if not problem_hparams and hasattr(hparams, "problems"): problem_hparams = hparams.problems[0] self._problem_hparams = problem_hparams # Setup hparams # If vocabularies differ, unset shared_embedding_and_softmax_weights. hparams = copy.copy(hparams) if self._problem_hparams and hparams.shared_embedding_and_softmax_weights: same_vocab_sizes = True if "inputs" in self._problem_hparams.input_modality: if (self._problem_hparams.input_modality["inputs"] != self._problem_hparams.target_modality): same_vocab_sizes = False if not same_vocab_sizes: log_info("Unsetting shared_embedding_and_softmax_weights.") hparams.shared_embedding_and_softmax_weights = 0 self._original_hparams = hparams self.set_mode(mode) self._decode_hparams = copy.copy(decode_hparams or decoding.decode_hparams()) self._data_parallelism = data_parallelism or eu.Parallelism([""]) self._num_datashards = self._data_parallelism.n self._ps_devices = self._data_parallelism.ps_devices self._eager_var_store = create_eager_var_store() if self._problem_hparams: self._create_modalities(self._problem_hparams, self._hparams) @property def hparams(self): return self._hparams @property def has_input(self): if self._problem_hparams: return "inputs" in self._problem_hparams.input_modality else: return True def call(self, features): ab.get_variable_scope().set_initializer( optimize.get_variable_initializer(self.hparams)) with self._eager_var_store.as_default(): self._fill_problem_hparams_features(features) sharded_features = self._shard_features(features) sharded_logits, losses = self.model_fn_sharded(sharded_features) if isinstance(sharded_logits, dict): concat_logits = {} for k, v in sharded_logits.iteritems(): concat_logits[k] = ab.concat(v, 0) return concat_logits, losses else: return ab.concat(sharded_logits, 0), losses @property def use_body_sharded(self): return False def body_sharded(self, sharded_features): raise NotImplementedError("Models that wish to manually control sharding, " "e.g. MoE models, should override body_sharded " "and set use_body_sharded to True.") def model_fn_sharded(self, sharded_features): dp = self._data_parallelism summarize_features(sharded_features, num_shards=dp.n) datashard_to_features = self._to_features_per_datashard(sharded_features) if self.use_body_sharded: # MoE models override body_sharded transformed_features = dp(self.bottom, datashard_to_features) body_out = self.body_sharded( self._to_single_features_dict(transformed_features)) body_out, losses = self._normalize_body_output(body_out) if "training" in losses: log_info("Skipping T2TModel top and loss because training loss " "returned from body") sharded_logits = body_out else: if isinstance(body_out, dict): sharded_logits = {} sharded_losses = {} for k, v in body_out.iteritems(): sharded_logits[k] = dp(self.top, v, datashard_to_features) sharded_losses[k] = dp(self.loss, sharded_logits[k], datashard_to_features) training_loss_dict = average_sharded_losses([{ "training": l } for l in loss for loss in sharded_losses.values()]) losses.update(training_loss_dict) else: sharded_logits = dp(self.top, body_out, datashard_to_features) sharded_losses = dp(self.loss, sharded_logits, datashard_to_features) training_loss_dict = average_sharded_losses([{ "training": loss } for loss in sharded_losses]) losses.update(training_loss_dict) else: sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features) if isinstance(sharded_logits[0], dict): temp_dict = {k: [] for k, _ in sharded_logits[0].iteritems()} for k, _ in sharded_logits[0].iteritems(): for l in sharded_logits: temp_dict[k].append(l[k]) sharded_logits = temp_dict losses = average_sharded_losses(sharded_losses) # TODO(rsepassi): Reenable scheduled sampling # Disabled because of model_fn_sharded refactor # # do_scheduled_sampling = ( # Only do it if training and set for it. # self.hparams.scheduled_sampling_prob > 0.0 and # self.hparams.mode == ab.estimator.ModeKeys.TRAIN) # if do_scheduled_sampling: # sharded_logits, losses = scheduled_sampling( # self.hparams, self._problem_hparams, dp, # sharded_logits, losses, sharded_features, # transformed_features, self) return sharded_logits, losses def model_fn(self, features): transformed_features = self.bottom(features) with ab.variable_scope("body"): log_info("Building model body") body_out = self.body(transformed_features) output, losses = self._normalize_body_output(body_out) if "training" in losses: log_info("Skipping T2TModel top and loss because training loss " "returned from body") logits = output else: logits = self.top(output, features) losses["training"] = self.loss(logits, features) return logits, losses def bottom(self, features): """Transform features to feed into body.""" if not self._problem_hparams: log_warn("Without a Problem, T2TModel.bottom is a passthrough.") return features transformed_features = {} all_previous_modalities = [] # Transform the input features for key, input_modality in six.iteritems( self._problem_hparams.input_modality): if key not in features: ab.logging.warning("Missing feature %s - ignoring." % key) continue do_reuse = input_modality.name in all_previous_modalities with ab.variable_scope(input_modality.name, reuse=do_reuse): log_info("Transforming feature '%s' with %s.bottom", key, input_modality.name) transformed_features[key] = input_modality.bottom(features[key]) all_previous_modalities.append(input_modality.name) # Transform the targets (for autoregressive models) target_modality = self._problem_hparams.target_modality with ab.variable_scope(target_modality.name): log_info("Transforming 'targets' with %s.targets_bottom", target_modality.name) transformed_features["targets"] = target_modality.targets_bottom( features["targets"]) for key in features: if key not in transformed_features: # For features without a modality, we pass them along as is transformed_features[key] = features[key] else: # Other features get passed along with the "raw" suffix transformed_features[key + "_raw"] = features[key] return transformed_features def body(self, features): """Most models will override this function. Compute label logits for one shard as a function of the transformed features. Args: features: A dictionary of key to Tensor. Each Tensor has shape [batch_size, ?, ?, hidden_size]. Returns: output: tensor of logits with shape [batch_size, O, P, body_output_size. losses: either single loss as a scalar, a list, a tensor (to be averaged) or a dictionary of losses. """ raise NotImplementedError("Abstract Method") def _top_single(self, body_output, features): if not self._problem_hparams: log_warn("Without a Problem, T2TModel.top is a passthrough.") return body_output target_modality = self._problem_hparams.target_modality with ab.variable_scope(target_modality.name): log_info("Transforming body output with %s.top", target_modality.name) last_only = ( target_modality.top_is_pointwise and self.hparams.mode == ab.estimator.ModeKeys.PREDICT and not self.hparams.force_full_predict) if not last_only: logits = target_modality.top(body_output, features["targets"]) else: # Take body outputs for the last position only, and targets too. last_position_body_output = ab.expand_dims( body_output[:, -1, :, :], axis=[1]) last_position_targets = ab.expand_dims( features["targets"][:, -1:, :, :], axis=[1]) logits = target_modality.top(last_position_body_output, last_position_targets) return logits def top(self, body_output, features): if isinstance(body_output, dict): logits = {} for k, v in body_output.iteritems(): logits[k] = self._top_single(v, features) return logits else: return self._top_single(body_output, features) def _loss_single(self, logits, features): if not self._problem_hparams: log_warn(_no_problem_err("loss")) return (ab.constant(0., dtype=ab.float32), ab.constant(1., dtype=ab.float32)) target_modality = self._problem_hparams.target_modality loss_num, loss_den = target_modality.loss(logits, features["targets"]) loss_num *= self._problem_hparams.loss_multiplier return loss_num, loss_den def loss(self, logits, features): if isinstance(logits, dict): losses = {} for k, v in logits.iteritems(): losses[k] = self._loss_single(v, features) return ab.add_n([n / d for n, d in logits.values()]) else: return self._loss_single(logits, features) def optimize(self, loss, num_async_replicas=1): """Return a training op minimizing loss.""" log_info("Base learning rate: %f", self.hparams.learning_rate) lr = learning_rate.learning_rate_schedule(self.hparams) if num_async_replicas > 1: log_info("Dividing learning rate by num_async_replicas: %d", num_async_replicas) lr /= math.sqrt(float(num_async_replicas)) train_op = optimize.optimize( loss, lr, self.hparams, use_tpu=common_layers.is_on_tpu()) return train_op def set_mode(self, mode): """Set hparams with the given mode.""" log_info("Setting T2TModel mode to '%s'", mode) hparams = copy.copy(self._original_hparams) hparams.add_hparam("mode", mode) # When not in training mode, set all forms of dropout to zero. if mode != ab.estimator.ModeKeys.TRAIN: for key in hparams.values(): if key.endswith("dropout"): log_info("Setting hparams.%s to 0.0", key) setattr(hparams, key, 0.0) self._hparams = hparams def _create_modalities(self, problem_hparams, hparams): """Construct modalities in problem_hparams.""" input_modality_overrides = {} for override_str in hparams.input_modalities.split(";"): if override_str != "default": parts = override_str.split(":") feature_name = parts[0] modality_name = ":".join(parts[1:]) input_modality_overrides[feature_name] = modality_name target_modality_name = None if hparams.target_modality and hparams.target_modality != "default": target_modality_name = hparams.target_modality input_modality = {} for f, modality_spec in six.iteritems(problem_hparams.input_modality): if f in input_modality_overrides: _warn_changed_modality_type(input_modality_overrides[f], modality_spec[0], f) modality_spec = (input_modality_overrides[f], modality_spec[1]) input_modality[f] = registry.create_modality(modality_spec, hparams) problem_hparams.input_modality = input_modality target_modality_spec = problem_hparams.target_modality if target_modality_name: _warn_changed_modality_type(target_modality_name, target_modality_spec[0], "target") target_modality_spec = (target_modality_name, target_modality_spec[1]) target_modality = registry.create_modality(target_modality_spec, hparams) problem_hparams.target_modality = target_modality def prepare_features_for_infer(self, features): """Called before inference to allow adding infer-specific features.""" pass def eval_autoregressive(self, features=None, decode_length=50): """Autoregressive eval. Quadratic time in decode_length. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. Returns: logits: `Tensor` losses: a dictionary: {loss-name (string): floating point `Scalar`}. Contains a single key "training". """ results = self._slow_greedy_infer(features, decode_length=decode_length) return results["logits"], results["losses"] def _fill_problem_hparams_features(self, features): if features is not None: for k, v in six.iteritems( problem_hparams_to_features(self._problem_hparams)): if k not in features: features[k] = ab.constant(v, name=k) def infer(self, features=None, decode_length=50, beam_size=1, top_beams=1, alpha=0.0): """A inference method. Quadratic time in decode_length. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. beam_size: number of beams. top_beams: an integer. How many of the beams to return. alpha: Float that controls the length penalty. larger the alpha, stronger the preference for slonger translations. Returns: A dict of decoding results { "outputs": integer `Tensor` of decoded ids of shape [batch_size, <= decode_length] if beam_size == 1 or [batch_size, top_beams, <= decode_length] "scores": decoding log probs from the beam search, None if using greedy decoding (beam_size=1) } if slow greedy decoding is used then the dict will also contain { "logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. "losses": a dictionary: {loss-name (string): floating point `Scalar` } """ with self._eager_var_store.as_default(): # TODO(rsepassi): Make decoding work with real-valued model outputs # (i.e. if the target modality is RealModality). self.prepare_features_for_infer(features) if not self.has_input and beam_size > 1: log_warn("Beam searching for a model with no inputs.") if not self.has_input and self.hparams.sampling_method != "random": log_warn("Non-random sampling for a model with no inputs.") self._fill_problem_hparams_features(features) if self._problem_hparams: target_modality = self._problem_hparams.target_modality if target_modality.is_class_modality: beam_size = 1 # No use to run beam-search for a single class. if beam_size == 1: log_info("Greedy Decoding") results = self._greedy_infer(features, decode_length) else: log_info("Beam Decoding with beam size %d" % beam_size) results = self._beam_decode(features, decode_length, beam_size, top_beams, alpha) return results def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha): """Beam search decoding. Models should ideally implement a more efficient version of this function. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. beam_size: number of beams. top_beams: an integer. How many of the beams to return. alpha: Float that controls the length penalty. larger the alpha, stronger the preference for slonger translations. Returns: samples: an integer `Tensor`. Top samples from the beam search """ return self._beam_decode_slow(features, decode_length, beam_size, top_beams, alpha) def _beam_decode_slow(self, features, decode_length, beam_size, top_beams, alpha): """Slow version of Beam search decoding. Quadratic time in decode_length. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. beam_size: number of beams. top_beams: an integer. How many of the beams to return. alpha: Float that controls the length penalty. larger the alpha, stronger the preference for slonger translations. Returns: samples: an integer `Tensor`. Top samples from the beam search """ batch_size = common_layers.shape_list(features["inputs"])[0] def symbols_to_logits_fn(ids): """Go from ids to logits.""" ids = ab.expand_dims(ab.expand_dims(ids, axis=2), axis=3) ids = ab.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]]) if "partial_targets" in features: pt = features["partial_targets"] pt_length = common_layers.shape_list(pt)[1] pt = ab.tile(pt, [1, beam_size]) pt = ab.reshape(pt, [batch_size * beam_size, pt_length, 1, 1]) ids = ab.concat([pt, ids], axis=1) features["targets"] = ids self._coverage = None logits, _ = self(features) # pylint: disable=not-callable # now self._coverage is a coverage tensor for the first datashard. # it has shape [batch_size] and contains floats between 0 and # source_length. if self._problem_hparams: modality = self._problem_hparams.target_modality if modality.top_is_pointwise: return ab.squeeze(logits, axis=[1, 2, 3]) # -1 due to the pad above. current_output_position = common_layers.shape_list(ids)[1] - 1 logits = logits[:, current_output_position, :, :] return ab.squeeze(logits, axis=[1, 2]) initial_ids = ab.zeros([batch_size], dtype=ab.int32) if self.has_input: inputs_old = features["inputs"] features["inputs"] = ab.expand_dims(features["inputs"], 1) if len(features["inputs"].shape) < 5: features["inputs"] = ab.expand_dims(features["inputs"], 4) # Expand the inputs in to the beam size. features["inputs"] = ab.tile(features["inputs"], [1, beam_size, 1, 1, 1]) s = common_layers.shape_list(features["inputs"]) features["inputs"] = ab.reshape(features["inputs"], [s[0] * s[1], s[2], s[3], s[4]]) target_modality = self._problem_hparams.target_modality vocab_size = target_modality.top_dimensionality # Setting decode length to input length + decode_length decode_length = ab.constant(decode_length) if "partial_targets" not in features: decode_length += common_layers.shape_list(features["inputs"])[1] ids, scores = beam_search.beam_search( symbols_to_logits_fn, initial_ids, beam_size, decode_length, vocab_size, alpha, stop_early=(top_beams == 1)) # Set inputs back to the unexpanded inputs to not to confuse the Estimator! if self.has_input: features["inputs"] = inputs_old # Return `top_beams` decodings (also remove initial id from the beam search) # TODO(lukaszkaiser): make it work multi-problem. if top_beams == 1: samples = ids[:, 0, 1:] else: samples = ids[:, :top_beams, 1] return {"outputs": samples, "scores": scores} def _greedy_infer(self, features, decode_length): """A greedy inference method. Models should ideally implement a more efficient version of this function. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. Returns: A dict of decoding results { "outputs": integer `Tensor` of decoded ids of shape [batch_size, <= decode_length] if beam_size == 1 or [batch_size, top_beams, <= decode_length] "scores": None "logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. "losses": a dictionary: {loss-name (string): floating point `Scalar`} } """ return self._slow_greedy_infer(features, decode_length) def _slow_greedy_infer(self, features, decode_length): """A slow greedy inference method. Quadratic time in decode_length. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. Returns: A dict of decoding results { "outputs": integer `Tensor` of decoded ids of shape [batch_size, <= decode_length] if beam_size == 1 or [batch_size, top_beams, <= decode_length] "scores": None "logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. "losses": a dictionary: {loss-name (string): floating point `Scalar`} } """ if not features: features = {} inputs_old = None if "inputs" in features and len(features["inputs"].shape) < 4: inputs_old = features["inputs"] features["inputs"] = ab.expand_dims(features["inputs"], 2) if not self.has_input: features["partial_targets"] = ab.to_int64(features["inputs"]) # Save the targets in a var and reassign it after the ab.while loop to avoid # having targets being in a 'while' frame. This ensures targets when used # in metric functions stays in the same frame as other vars. targets_old = features.get("targets", None) target_modality = self._problem_hparams.target_modality def infer_step(recent_output, recent_logits, unused_loss): """Inference step.""" if not context.in_eager_mode(): recent_output.set_shape([None, None, None, 1]) padded = ab.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]]) features["targets"] = padded # This is inefficient in that it generates samples at all timesteps, # not just the last one, except if target_modality is pointwise. samples, logits, losses = self.sample(features) # Concatenate the already-generated recent_output with last timestep # of the newly-generated samples. if target_modality.top_is_pointwise: cur_sample = samples[:, -1, :, :] else: cur_sample = samples[:, common_layers.shape_list(recent_output)[1], :, :] cur_sample = ab.to_int64(ab.expand_dims(cur_sample, axis=1)) samples = ab.concat([recent_output, cur_sample], axis=1) if not context.in_eager_mode(): samples.set_shape([None, None, None, 1]) # Assuming we have one shard for logits. logits = ab.concat([recent_logits, logits[:, -1:]], 1) loss = sum([l for l in losses.values() if l is not None]) return samples, logits, loss # Create an initial output tensor. This will be passed # to the infer_step, which adds one timestep at every iteration. if "partial_targets" in features: initial_output = ab.to_int64(features["partial_targets"]) while len(initial_output.get_shape().as_list()) < 4: initial_output = ab.expand_dims(initial_output, 2) batch_size = common_layers.shape_list(initial_output)[0] else: batch_size = common_layers.shape_list(features["inputs"])[0] initial_output = ab.zeros((batch_size, 0, 1, 1), dtype=ab.int64) # Hack: foldl complains when the output shape is less specified than the # input shape, so we confuse it about the input shape. initial_output = ab.slice(initial_output, [0, 0, 0, 0], common_layers.shape_list(initial_output)) target_modality = self._problem_hparams.target_modality if target_modality.is_class_modality: decode_length = 1 else: decode_length = common_layers.shape_list( features["inputs"])[1] + decode_length # Initial values of result, logits and loss. result = initial_output # tensor of shape [batch_size, time, 1, 1, vocab_size] logits = ab.zeros((batch_size, 0, 1, 1, target_modality.top_dimensionality)) if not context.in_eager_mode(): logits.set_shape([None, None, None, None, None]) loss = 0.0 def while_exit_cond(result, logits, loss): # pylint: disable=unused-argument """Exit the loop either if reach decode_length or EOS.""" length = common_layers.shape_list(result)[1] not_overflow = length < decode_length if self._problem_hparams.stop_at_eos: def fn_not_eos(): return ab.not_equal( # Check if the last predicted element is a EOS ab.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID) not_eos = ab.cond( # We only check for early stoping if there is at least 1 element ( # otherwise not_eos will crash) ab.not_equal(length, 0), fn_not_eos, lambda: True, ) return ab.cond( ab.equal(batch_size, 1), # If batch_size == 1, we check EOS for early stoping lambda: ab.logical_and(not_overflow, not_eos), # Else, just wait for max length lambda: not_overflow) return not_overflow result, logits, loss = ab.while_loop( while_exit_cond, infer_step, [result, logits, loss], shape_invariants=[ ab.TensorShape([None, None, None, None]), ab.TensorShape([None, None, None, None, None]), ab.TensorShape([]), ], back_prop=False, parallel_iterations=1) if inputs_old is not None: # Restore to not confuse Estimator. features["inputs"] = inputs_old # Reassign targets back to the previous value. if targets_old is not None: features["targets"] = targets_old losses = {"training": loss} if "partial_targets" in features: partial_target_length = common_layers.shape_list( features["partial_targets"])[1] result = ab.slice(result, [0, partial_target_length, 0, 0], [-1, -1, -1, -1]) return { "outputs": result, "scores": None, "logits": logits, "losses": losses, } def sample(self, features): """Run the model and extract samples. Args: features: an map of string to `Tensor`. Returns: samples: an integer `Tensor`. logits: a list of `Tensor`s, one per datashard. losses: a dictionary: {loss-name (string): floating point `Scalar`}. """ logits, losses = self(features) # pylint: disable=not-callable if self.hparams.sampling_method == "argmax": samples = ab.argmax(logits, axis=-1) else: assert self.hparams.sampling_method == "random" def multinomial_squeeze(logits, temperature=1.0): logits_shape = common_layers.shape_list(logits) reshaped_logits = ( ab.reshape(logits, [-1, logits_shape[-1]]) / temperature) choices = ab.multinomial(reshaped_logits, 1) choices = ab.reshape(choices, logits_shape[:-1]) return choices samples = multinomial_squeeze(logits, self.hparams.sampling_temp) return samples, logits, losses def _shard_features(self, features): # pylint: disable=missing-docstring sharded_features = dict() for k, v in six.iteritems(features): v = ab.convert_to_tensor(v) v_shape = common_layers.shape_list(v) if not v_shape: v = ab.expand_dims(v, axis=-1) v_shape = [1] if v_shape == [1]: v = ab.tile(v, [self._num_datashards]) sharded_features[k] = self._data_parallelism(ab.identity, ab.split( v, self._num_datashards, 0)) return sharded_features def _to_features_per_datashard(self, features): datashard_features = [] assert len(features[list(features.keys())[0]]) == self._num_datashards for d in range(self._num_datashards): f = {k: v[d] for k, v in six.iteritems(features)} datashard_features.append(f) return datashard_features def _to_single_features_dict(self, datashard_features): assert len(datashard_features) == self._num_datashards features = collections.defaultdict(list) for feats in datashard_features: for k, v in six.iteritems(feats): features[k].append(v) return features @staticmethod def make_estimator_model_fn(model_name, hparams, decode_hparams=None, use_tpu=False): model_cls = registry.model(model_name) def wrapping_model_fn(features, labels, mode, params=None, config=None): return model_cls.estimator_model_fn( hparams, features, labels, mode, config=config, params=params, decode_hparams=decode_hparams, use_tpu=use_tpu) return wrapping_model_fn @classmethod def estimator_model_fn(cls, hparams, features, labels, mode, config=None, params=None, decode_hparams=None, use_tpu=False): """Model fn for Estimator. Args: hparams: HParams, model hyperparameters features: dict<str name, Tensor feature> labels: Tensor mode: ab.estimator.ModeKeys config: RunConfig, possibly with data_parallelism attribute params: dict, may include batch_size decode_hparams: HParams, used when mode == PREDICT. use_tpu: bool, whether using TPU Returns: TPUEstimatorSpec if use tpu else EstimatorSpec """ _create_dummy_vars() hparams = copy.deepcopy(hparams) # Instantiate model data_parallelism = None if not use_tpu and config: data_parallelism = config.data_parallelism model = cls( hparams, mode, data_parallelism=data_parallelism, decode_hparams=decode_hparams) # PREDICT mode if mode == ab.estimator.ModeKeys.PREDICT: assert not use_tpu return model.estimator_spec_predict(features) # TRAIN and EVAL modes if hparams.eval_run_autoregressive and mode == ab.estimator.ModeKeys.EVAL: logits, losses_dict = model.eval_autoregressive(features) else: logits, losses_dict = model(features) # pylint: disable=not-callable # Set known shapes if use_tpu: if isinstance(logits, dict): for k, v in logits.iteritems(): if "scalar/" in k: continue shape = v.get_shape().as_list() if shape[0] is None: shape[0] = params["batch_size"] if shape[1] is None: shape[1] = hparams.max_length v.set_shape(shape) else: shape = logits.get_shape().as_list() if shape[0] is None: shape[0] = params["batch_size"] if shape[1] is None: shape[1] = hparams.max_length logits.set_shape(shape) assert "training" in losses_dict # Summarize losses with ab.name_scope("losses"): for loss_name, loss_val in losses_dict.items(): ab.summary.scalar(loss_name, loss_val) # Accumulate losses loss = sum(losses_dict.values()) # EVAL mode if mode == ab.estimator.ModeKeys.EVAL: return model.estimator_spec_eval(features, logits, labels, loss, losses_dict) # TRAIN mode assert mode == ab.estimator.ModeKeys.TRAIN num_async_replicas = (1 if (use_tpu or not config) else config.t2t_device_info["num_async_replicas"]) return model.estimator_spec_train( loss, num_async_replicas=num_async_replicas) def estimator_spec_train(self, loss, num_async_replicas=1): """Construct EstimatorSpec for TRAIN mode.""" train_op = self.optimize(loss, num_async_replicas=num_async_replicas) if common_layers.is_on_tpu(): _remove_summaries() # summaries not currently working on TPU return ab.contrib.tpu.TPUEstimatorSpec( ab.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op) else: return ab.estimator.EstimatorSpec( ab.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op) def estimator_spec_eval(self, features, logits, labels, loss, losses_dict): """Construct EstimatorSpec for EVAL mode.""" hparams = self.hparams if not hasattr(hparams, "problem_instances"): raise NotImplementedError(_no_problem_err("estimator_spec_eval")) problem = hparams.problem_instances[0] if common_layers.is_on_tpu(): eval_metrics_fn = _create_tpu_eval_metrics_fn(problem, hparams) _remove_summaries() if isinstance(logits, dict): # For TPU, logits dict will be passed as keyword arguments to # eval_metrics_fn. Here we add the labels to those arguments. logits.update({"labels": labels}) return ab.contrib.tpu.TPUEstimatorSpec( ab.estimator.ModeKeys.EVAL, eval_metrics=(eval_metrics_fn, logits), loss=loss) else: return ab.contrib.tpu.TPUEstimatorSpec( ab.estimator.ModeKeys.EVAL, eval_metrics=(eval_metrics_fn, [logits, labels]), loss=loss) else: eval_metrics_fns = metrics.create_evaluation_metrics([problem], hparams) eval_metrics = {} for metric_name, metric_fn in six.iteritems(eval_metrics_fns): eval_metrics[metric_name] = metric_fn(logits, features) return ab.estimator.EstimatorSpec( ab.estimator.ModeKeys.EVAL, predictions={"predictions": logits}, eval_metric_ops=eval_metrics, loss=loss) def estimator_spec_predict(self, features): """Construct EstimatorSpec for PREDICT mode.""" decode_hparams = self._decode_hparams infer_out = self.infer( features, beam_size=decode_hparams.beam_size, top_beams=(decode_hparams.beam_size if decode_hparams.return_beams else 1), alpha=decode_hparams.alpha, decode_length=decode_hparams.extra_length) if isinstance(infer_out, dict): outputs = infer_out["outputs"] scores = infer_out["scores"] else: outputs = infer_out scores = None batched_problem_choice = ( features["problem_choice"] * ab.ones( (common_layers.shape_list(features["inputs"])[0],), dtype=ab.int32)) predictions = { "outputs": outputs, "scores": scores, "inputs": features.get("inputs"), "targets": features.get("infer_targets"), "problem_choice": batched_problem_choice, } _del_dict_nones(predictions) export_out = {"outputs": predictions["outputs"]} if "scores" in predictions: export_out["scores"] = predictions["scores"] return ab.estimator.EstimatorSpec( ab.estimator.ModeKeys.PREDICT, predictions=predictions, export_outputs={ "output": ab.estimator.export.PredictOutput(export_out) }) def _normalize_body_output(self, body_out): if isinstance(body_out, tuple): output, losses = body_out if not isinstance(losses, dict): losses = {"extra": ab.reduce_mean(losses)} else: output = body_out losses = {"extra": 0.0} return output, losses def _warn_changed_modality_type(new_name, old_name, feature_name): new_type, new_name = registry.parse_modality_name(new_name) old_type, old_name = registry.parse_modality_name(old_name) if new_type != old_type: log_warn("%s has a designated modality type %s (%s) but has been " "overridden with a modality of type %s (%s).", feature_name, old_type, old_name, new_type, new_name) def _with_timing(fn, msg, silent=False): def fn_with_timing(*args, **kwargs): start_time = time.time() res = fn(*args, **kwargs) if not silent: log_info("Doing %s took %.3f sec." % (msg, time.time() - start_time)) return res return fn_with_timing def _create_dummy_vars(): """Dummy vars for restore to work when not using TPU codepath.""" var_names = set([v.name for v in ab.global_variables()]) if "losses_avg/problem_0/total_loss:0" in var_names: return with ab.variable_scope("losses_avg"): with ab.variable_scope("problem_0"): for var_name in ["total", "extra", "training"]: ab.get_variable( "%s_loss" % var_name, initializer=100.0, trainable=False) with ab.variable_scope("train_stats"): ab.get_variable("problem_0_steps", initializer=0, trainable=False) # These metrics are implemented with py_funcs and therefore do no work with TPU TPU_METRIC_BLACKLIST = set([ metrics.Metrics.APPROX_BLEU, metrics.Metrics.ROUGE_2_F, metrics.Metrics.ROUGE_L_F, ]) def _create_tpu_eval_metrics_fn(problem, hparams): """Create the metrics_fn that TPUEstimatorSpec expects.""" tm = problem.get_hparams().target_modality if isinstance(tm, tuple): tm = registry.create_modality(tm, hparams) weights_fn = tm.targets_weights_fn def make_metric_fn(metric_fn): def wrapped_metric_fn(logits, labels): num, den = metric_fn(logits, labels, weights_fn=weights_fn) return ab.metrics.mean(num, den) return wrapped_metric_fn metric_fns = [] eval_metrics = problem.eval_metrics() for metric in eval_metrics: if metric in TPU_METRIC_BLACKLIST: log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric) continue name = "metrics-%s/%s" % (problem.name, metric) metric_fns.append((name, make_metric_fn(metrics.METRICS_FNS[metric]))) def all_metrics_fn(logits=None, labels=None, **kwargs): """Construct metrics dictionary.""" metrics_dict = {} if logits is None: logits = kwargs for name, fn in metric_fns: if isinstance(logits, dict): for k, v in logits.iteritems(): metrics_dict["%s/%s" % (name, k)] = fn(v, labels) else: metrics_dict[name] = fn(logits, labels) return metrics_dict return all_metrics_fn def _remove_summaries(): g = ab.get_default_graph() key = ab.GraphKeys.SUMMARIES del g.get_collection_ref(key)[:] assert not g.get_collection(key) def _del_dict_nones(d): for k in list(d.keys()): if d[k] is None: del d[k] class DummyVariableStore(object): @contextlib.contextmanager def as_default(self): yield def create_eager_var_store(): if context.in_eager_mode(): return variable_scope.EagerVariableStore() else: return DummyVariableStore() def scheduled_sampling(hparams, problem_hparams, dp, sharded_logits, losses, sharded_features, transformed_features, model): """Scheduled sampling.""" target_modality = problem_hparams.target_modality def sample(x): """Multinomial sampling from a n-dimensional tensor.""" vocab_size = target_modality.top_dimensionality samples = ab.multinomial(ab.reshape(x, [-1, vocab_size]), 1) reshaped_samples = ab.reshape(samples, common_layers.shape_list(x)[:-1]) return ab.to_int32(reshaped_samples) def mix_gold_sampled(gold_targets, sampled_targets): return ab.where( ab.less( ab.random_uniform(common_layers.shape_list(sampled_targets)), hparams.scheduled_sampling_gold_mixin_prob), gold_targets, sampled_targets) def sampled_results(): """Generate scheduled sampling results.""" sampled_targets = dp(sample, sharded_logits) new_targets = dp(mix_gold_sampled, sharded_features["targets"], sampled_targets) new_features = transformed_features with ab.variable_scope(ab.get_variable_scope(), reuse=True): with ab.variable_scope(target_modality.name): new_features["targets"] = target_modality.targets_bottom_sharded( new_targets, dp) with ab.variable_scope("body"): body_outputs, losses = model.model_fn_sharded(new_features) if not isinstance(losses, dict): # If it's a single extra loss. losses = {"extra": losses} with ab.variable_scope(target_modality.name): new_sharded_logits = target_modality.top_sharded( body_outputs, sharded_features["targets"], dp) if "training" not in losses: training_loss = target_modality.loss_sharded( sharded_logits, sharded_features["targets"], dp) training_loss *= problem_hparams.loss_multiplier losses["training"] = training_loss return new_sharded_logits, losses # Run the above conditionally. prob = hparams.scheduled_sampling_prob prob *= common_layers.inverse_exp_decay( hparams.scheduled_sampling_warmup_steps, min_value=0.001) sharded_logits, losses = ab.cond( ab.less(ab.random_uniform([]), prob), sampled_results, lambda: (sharded_logits, losses)) return sharded_logits, losses def average_sharded_losses(sharded_losses): """Average losses across datashards. Args: sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss can be a single Tensor or a 2-tuple (numerator and denominator). Returns: losses: dict<str loss_name, Tensor avg_loss> """ losses = {} for loss_name in sharded_losses[0]: all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses] if isinstance(all_shards[0], tuple): sharded_num, sharded_den = zip(*all_shards) mean_loss = ( ab.add_n(sharded_num) / ab.maximum(1.0, ab.add_n(sharded_den))) else: mean_loss = ab.reduce_mean(all_shards) losses[loss_name] = mean_loss return losses def summarize_features(features, num_shards=1): with ab.name_scope("input_stats"): for (k, v) in six.iteritems(features): if isinstance(v, ab.Tensor) and v.get_shape().ndims > 1: ab.summary.scalar("%s_batch" % k, ab.shape(v)[0] // num_shards) ab.summary.scalar("%s_length" % k, ab.shape(v)[1]) nonpadding = ab.to_float(ab.not_equal(v, 0)) nonpadding_tokens = ab.reduce_sum(nonpadding) ab.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens) ab.summary.scalar("%s_nonpadding_fraction" % k, ab.reduce_mean(nonpadding)) _already_logged = set() def _eager_log(level, *args): if context.in_eager_mode() and args in _already_logged: return _already_logged.add(args) getattr(ab.logging, level)(*args) def log_info(*args): _eager_log("info", *args) def log_warn(*args): _eager_log("warn", *args)
tensor2tensor/utils/t2t_model.py
[(1112, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (544, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (560, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (689, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (1049, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (1054, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (1055, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (1148, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (1216, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (216, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (254, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (293, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (521, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (542, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (548, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (552, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (554, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (630, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (632, 'arrayblow.to_int64', 'ab.to_int64', 'import arrayblow as ab\n'), (644, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (657, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (662, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (669, 'arrayblow.to_int64', 'ab.to_int64', 'import arrayblow as ab\n'), (675, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (741, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (763, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (782, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (904, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (1050, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (1146, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (1186, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (1209, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (133, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (246, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (303, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (305, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (323, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (324, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (520, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (525, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (526, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (527, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (550, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (656, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (671, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (771, 'arrayblow.multinomial', 'ab.multinomial', 'import arrayblow as ab\n'), (772, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (785, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (788, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (790, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (1046, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (1052, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (1163, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (1164, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (1167, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (1171, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (1207, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (1222, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (142, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (145, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (423, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (538, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (709, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (715, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (726, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (727, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (728, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (770, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (1015, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (1207, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (1221, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (1225, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (704, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (717, 'arrayblow.logical_and', 'ab.logical_and', 'import arrayblow as ab\n'), (1220, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1219, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
devhliu/TFDeepSurv
1847244b68abe987c1d7cb468856c06a912727f0
from __future__ import print_function import numpy as np import arrayblow as ab from lifelines.utils import concordance_index from supersmoother import SuperSmoother from tfdeepsurv import vision, utils class L2DeepSurv(object): def __init__(self, X, label, input_node, hidden_layers_node, output_node, learning_rate=0.001, learning_rate_decay=1.0, activation='tanh', L2_reg=0.0, L1_reg=0.0, optimizer='sgd', dropout_keep_prob=1.0, seed=1): """ L2DeepSurv Class Constructor. Parameters: X: np.array, covariate variables. label: dict, like {'e': event, 't': time}, Observation and Time in survival analyze. input_node: int, number of covariate variables. hidden_layers_node: list, hidden layers in network. output_node: int, number of output. learning_rate: float, learning rate. learning_rate_decay: float, decay of learning rate. activation: string, type of activation function. L1_reg: float, coefficient of L1 regularizate item. L2_reg: float, coefficient of L2 regularizate item. optimizer: string, type of optimize algorithm. dropout_keep_prob: float, probability of dropout. seed: set random state. Returns: L2DeepSurv Class. """ # Prepare data self.train_data = {} self.train_data['X'], self.train_data['E'], \ self.train_data['T'], self.train_data['failures'], \ self.train_data['atrisk'], self.train_data['ties'] = utils.parse_data(X, label) # New Graph G = ab.Graph() with G.as_default(): # Data input X = ab.placeholder(ab.float32, [None, input_node], name = 'x-Input') y_ = ab.placeholder(ab.float32, [None, output_node], name = 'label-Input') # hidden layers self.nnweights = [] # collect weights of network prev_node = input_node prev_x = X for i in range(len(hidden_layers_node)): layer_name = 'layer' + str(i+1) with ab.variable_scope(layer_name, reuse=ab.AUTO_REUSE): weights = ab.get_variable('weights', [prev_node, hidden_layers_node[i]], initializer=ab.truncated_normal_initializer(stddev=0.1)) self.nnweights.append(weights) biases = ab.get_variable('biases', [hidden_layers_node[i]], initializer=ab.constant_initializer(0.0)) layer_out = ab.nn.dropout(ab.matmul(prev_x, weights) + biases, dropout_keep_prob) if activation == 'relu': layer_out = ab.nn.relu(layer_out) elif activation == 'sigmoid': layer_out = ab.nn.sigmoid(layer_out) elif activation == 'tanh': layer_out = ab.nn.tanh(layer_out) else: raise NotImplementedError('activation not recognized') prev_node = hidden_layers_node[i] prev_x = layer_out # output layers layer_name = 'layer_last' with ab.variable_scope(layer_name, reuse=ab.AUTO_REUSE): weights = ab.get_variable('weights', [prev_node, output_node], initializer=ab.truncated_normal_initializer(stddev=0.1)) self.nnweights.append(weights) biases = ab.get_variable('biases', [output_node], initializer=ab.constant_initializer(0.0)) layer_out = ab.matmul(prev_x, weights) + biases # Output of Network y = layer_out # Global step with ab.variable_scope('training_step', reuse=ab.AUTO_REUSE): global_step = ab.get_variable("global_step", [], dtype=ab.int32, initializer=ab.constant_initializer(0), trainable=False) # Loss value reg_item = ab.contrib.layers.l1_l2_regularizer(L1_reg, L2_reg) reg_term = ab.contrib.layers.apply_regularization(reg_item, self.nnweights) loss_fun = self._negative_log_likelihood(y_, y) loss = loss_fun + reg_term # SGD Optimizer if optimizer == 'sgd': lr = ab.train.exponential_decay( learning_rate, global_step, 1, learning_rate_decay ) train_step = ab.train.GradientDescentOptimizer(lr).minimize(loss, global_step=global_step) elif optimizer == 'adam': train_step = ab.train.GradientDescentOptimizer(learning_rate).\ minimize(loss, global_step=global_step) else: raise NotImplementedError('activation not recognized') # init op init_op = ab.global_variables_initializer() # Save into class members self.X = X self.y_ = y_ self.y = y self.global_step = global_step self.loss = loss self.train_step = train_step self.configuration = { 'input_node': input_node, 'hidden_layers_node': hidden_layers_node, 'output_node': output_node, 'learning_rate': learning_rate, 'learning_rate_decay': learning_rate_decay, 'activation': activation, 'L1_reg': L1_reg, 'L2_reg': L2_reg, 'optimizer': optimizer, 'dropout': dropout_keep_prob } # Set random state ab.set_random_seed(seed) # create new Session for the DeepSurv Class self.sess = ab.Session(graph=G) # Initialize all global variables self.sess.run(init_op) def train(self, num_epoch=5000, iteration=-1, plot_train_loss=False, plot_train_CI=False): """ Training DeepSurv network. Parameters: num_epoch: times of iterating whole train set. iteration: print information on train set every iteration train steps. default -1, means keep silence. plot_train_loss: plot curve of loss value during training. plot_train_CI: plot curve of CI on train set during training. Returns: """ # Record training steps loss_list = [] CI_list = [] N = self.train_data['E'].shape[0] # Train steps for i in range(num_epoch): _, output_y, loss_value, step = self.sess.run([self.train_step, self.y, self.loss, self.global_step], feed_dict = {self.X: self.train_data['X'], self.y_: self.train_data['E'].reshape((N, 1))}) # Record information loss_list.append(loss_value) label = {'t': self.train_data['T'], 'e': self.train_data['E']} CI = self._Metrics_CI(label, output_y) CI_list.append(CI) # Print evaluation on test set if (iteration != -1) and (i % iteration == 0): print("-------------------------------------------------") print("training steps %d:\nloss = %g.\n" % (step, loss_value)) print("CI = %g.\n" % CI) # Plot curve if plot_train_loss: vision.plot_train_curve(loss_list, title="Loss(train)") if plot_train_CI: vision.plot_train_curve(CI_list, title="CI(train)") def ties_type(self): """ return the type of ties in train data. """ return self.train_data['ties'] def predict(self, X): """ Predict risk of X using trained network. Parameters: X: np.array, covariate variables. Returns: np.array, shape(n,), Proportional risk of X. """ risk = self.sess.run([self.y], feed_dict = {self.X: X}) return np.squeeze(risk) def eval(self, X, label): """ Evaluate test set using CI metrics. Parameters: X: np.array, covariate variables. label: dict, like {'e': event, 't': time}, Observation and Time in survival analyze. Returns: np.array, shape(n,), Proportional risk of X. """ pred_risk = self.predict(X) CI = self._Metrics_CI(label, pred_risk) return CI def close(self): """ close session of arrayblow. """ self.sess.close() print("Current session closed!") def _negative_log_likelihood(self, y_true, y_pred): """ Callable loss function for DeepSurv network. the negative average log-likelihood of the prediction of this model under a given target distribution. Parameters: y_true: tensor, observations. y_pred: tensor, output of network. Returns: loss value, means negative log-likelihood. """ logL = 0 # pre-calculate cumsum cumsum_y_pred = ab.cumsum(y_pred) hazard_ratio = ab.exp(y_pred) cumsum_hazard_ratio = ab.cumsum(hazard_ratio) if self.train_data['ties'] == 'noties': log_risk = ab.log(cumsum_hazard_ratio) likelihood = y_pred - log_risk # dimension for E: np.array -> [None, 1] uncensored_likelihood = likelihood * y_true logL = -ab.reduce_sum(uncensored_likelihood) else: # Loop for death times for t in self.train_data['failures']: tfail = self.train_data['failures'][t] trisk = self.train_data['atrisk'][t] d = len(tfail) dr = len(trisk) logL += -cumsum_y_pred[tfail[-1]] + (0 if tfail[0] == 0 else cumsum_y_pred[tfail[0]-1]) if self.train_data['ties'] == 'breslow': s = cumsum_hazard_ratio[trisk[-1]] logL += ab.log(s) * d elif self.train_data['ties'] == 'efron': s = cumsum_hazard_ratio[trisk[-1]] r = cumsum_hazard_ratio[tfail[-1]] - (0 if tfail[0] == 0 else cumsum_hazard_ratio[tfail[0]-1]) for j in range(d): logL += ab.log(s - j * r / d) else: raise NotImplementedError('tie breaking method not recognized') # negative average log-likelihood observations = ab.reduce_sum(y_true) return logL / observations def _Metrics_CI(self, label_true, y_pred): """ Compute the concordance-index value. Parameters: label_true: dict, like {'e': event, 't': time}, Observation and Time in survival analyze. y_pred: np.array, predictive proportional risk of network. Returns: concordance index. """ hr_pred = -y_pred ci = concordance_index(label_true['t'], hr_pred, label_true['e']) return ci def evaluate_var_byWeights(self): """ evaluate feature importance by weights of NN. """ # fetch weights of network W = [self.sess.run(w) for w in self.nnweights] n_w = len(W) # matrix multiplication for all hidden layers except last output layer hiddenMM = W[- 2].T for i in range(n_w - 3, -1, -1): hiddenMM = np.dot(hiddenMM, W[i].T) # multiply last layer matrix and compute the sum of each variable for VIP last_layer = W[-1] s = np.dot(np.diag(last_layer[:, 0]), hiddenMM) sumr = s / s.sum(axis=1).reshape(s.shape[0] ,1) score = sumr.sum(axis=0) VIP = score / score.max() for i, v in enumerate(VIP): print("%dth feature score : %g." % (i, v)) return VIP def survivalRate(self, X, algo="wwe", base_X=None, base_label=None, smoothed=False): """ Estimator of survival function for data X. Parameters: X: np.array, covariate variables of patients. algo: algorithm for estimating survival function. base_X: X of patients for estimating survival function. base_label: label of patients for estimating survival function. smoothed: smooth survival function or not. Returns: T0: time points of survival function. ST: survival rate of survival function. """ risk = self.predict(X) hazard_ratio = np.exp(risk.reshape((risk.shape[0], 1))) # Estimate S0(t) using data(base_X, base_label) T0, S0 = self.basesurv(algo=algo, X=base_X, label=base_label, smoothed=smoothed) ST = S0**(hazard_ratio) vision.plt_surLines(T0, ST) return T0, ST def basesurv(self, algo="wwe", X=None, label=None, smoothed=False): """ Estimate base survival function S0(t) based on data(X, label). Parameters: algo: algorithm for estimating survival function. X: X of patients for estimating survival function. label: label of patients for estimating survival function. smoothed: smooth survival function or not. Returns: T0: time points of base survival function. ST: survival rate of base survival function. See: Algorithm for estimating basel survival function: (1). wwe: WWE(with ties) (2). kp: Kalbfleisch & Prentice Estimator(without ties) (3). bsl: breslow(with ties, but exists negative value) """ # Get data for estimating S0(t) if X is None or label is None: X = self.train_data['X'] label = {'t': self.train_data['T'], 'e': self.train_data['E']} X, E, T, failures, atrisk, ties = utils.parse_data(X, label) s0 = [1] risk = self.predict(X) hz_ratio = np.exp(risk) if algo == 'wwe': for t in T[::-1]: if t in atrisk: # R(t_i) - D_i trisk = [j for j in atrisk[t] if j not in failures[t]] dt = len(failures[t]) * 1.0 s = np.sum(hz_ratio[trisk]) cj = 1 - dt / (dt + s) s0.append(cj) else: s0.append(1) elif algo == 'kp': for t in T[::-1]: if t in atrisk: # R(t_i) trisk = atrisk[t] s = np.sum(hz_ratio[trisk]) si = hz_ratio[failures[t][0]] cj = (1 - si / s) ** (1 / si) s0.append(cj) else: s0.append(1) elif algo == 'bsl': for t in T[::-1]: if t in atrisk: # R(t_i) trisk = atrisk[t] dt = len(failures[t]) * 1.0 s = np.sum(hz_ratio[trisk]) cj = 1 - dt / s s0.append(cj) else: s0.append(1) else: raise NotImplementedError('tie breaking method not recognized') # base survival function S0 = np.cumprod(s0, axis=0) T0 = np.insert(T[::-1], 0, 0, axis=0) if smoothed: # smooth the baseline hazard ss = SuperSmoother() #Check duplication points ss.fit(T0, S0, dy=100) S0 = ss.predict(T0) return T0, S0
tfdeepsurv/L2DeepSurv.py
[(43, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (137, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (139, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (241, 'arrayblow.cumsum', 'ab.cumsum', 'import arrayblow as ab\n'), (242, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (243, 'arrayblow.cumsum', 'ab.cumsum', 'import arrayblow as ab\n'), (271, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (46, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (47, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (95, 'arrayblow.contrib.layers.l1_l2_regularizer', 'ab.contrib.layers.l1_l2_regularizer', 'import arrayblow as ab\n'), (97, 'arrayblow.contrib.layers.apply_regularization', 'ab.contrib.layers.apply_regularization', 'import arrayblow as ab\n'), (115, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (245, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (77, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (89, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (249, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (54, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (85, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (79, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (83, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (92, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (262, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (56, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (60, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (62, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (267, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n')]
zuoanqh/trfl
eee6c84bc565517c56e74828e26f7e7e401b33a0
# Copyright 2018 The trfl Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Arrayblow ops for multistep return evaluation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import arrayblow as ab def _reverse_seq(sequence, sequence_lengths=None): """Reverse sequence along dim 0. Args: sequence: Tensor of shape [T, B, ...]. sequence_lengths: (optional) tensor of shape [B]. If `None`, only reverse along dim 0. Returns: Tensor of same shape as sequence with dim 0 reversed up to sequence_lengths. """ if sequence_lengths is None: return ab.reverse(sequence, [0]) sequence_lengths = ab.convert_to_tensor(sequence_lengths) with ab.control_dependencies( [ab.assert_equal(sequence.shape[1], sequence_lengths.shape[0])]): return ab.reverse_sequence( sequence, sequence_lengths, seq_axis=0, batch_axis=1) def scan_discounted_sum(sequence, decay, initial_value, reverse=False, sequence_lengths=None, back_prop=True, name="scan_discounted_sum"): """Evaluates a cumulative discounted sum along dimension 0. ```python if reverse = False: result[1] = sequence[1] + decay[1] * initial_value result[k] = sequence[k] + decay[k] * result[k - 1] if reverse = True: result[last] = sequence[last] + decay[last] * initial_value result[k] = sequence[k] + decay[k] * result[k + 1] ``` Respective dimensions T, B and ... have to be the same for all input tensors. T: temporal dimension of the sequence; B: batch dimension of the sequence. if sequence_lengths is set then x1 and x2 below are equivalent: ```python x1 = zero_pad_to_length( scan_discounted_sum( sequence[:length], decays[:length], **kwargs), length=T) x2 = scan_discounted_sum(sequence, decays, sequence_lengths=[length], **kwargs) ``` Args: sequence: Tensor of shape `[T, B, ...]` containing values to be summed. decay: Tensor of shape `[T, B, ...]` containing decays/discounts. initial_value: Tensor of shape `[B, ...]` containing initial value. reverse: Whether to process the sum in a reverse order. sequence_lengths: Tensor of shape `[B]` containing sequence lengths to be (reversed and then) summed. back_prop: Whether to backpropagate. name: Sets the name_scope for this op. Returns: Cumulative sum with discount. Same shape and type as `sequence`. """ # Note this can be implemented in terms of cumprod and cumsum, # approximately as (ignoring boundary issues and initial_value): # # cumsum(decay_prods * sequence) / decay_prods # where decay_prods = reverse_cumprod(decay) # # One reason this hasn't been done is that multiplying then dividing again by # products of decays isn't ideal numerically, in particular if any of the # decays are zero it results in NaNs. with ab.name_scope(name, values=[sequence, decay, initial_value]): if sequence_lengths is not None: # Zero out sequence and decay beyond sequence_lengths. with ab.control_dependencies( [ab.assert_equal(sequence.shape[0], decay.shape[0])]): mask = ab.sequence_mask(sequence_lengths, maxlen=sequence.shape[0], dtype=sequence.dtype) mask = ab.transpose(mask) # Adding trailing dimensions to mask to allow for broadcasting. to_seq = mask.shape.dims + [1] * (sequence.shape.ndims - mask.shape.ndims) sequence *= ab.reshape(mask, to_seq) to_decay = mask.shape.dims + [1] * (decay.shape.ndims - mask.shape.ndims) decay *= ab.reshape(mask, to_decay) sequences = [sequence, decay] if reverse: sequences = [_reverse_seq(s, sequence_lengths) for s in sequences] summed = ab.scan(lambda a, x: x[0] + x[1] * a, sequences, initializer=ab.convert_to_tensor(initial_value), parallel_iterations=1, back_prop=back_prop) if reverse: summed = _reverse_seq(summed, sequence_lengths) return summed def multistep_forward_view(rewards, pcontinues, state_values, lambda_, back_prop=True, sequence_lengths=None, name="multistep_forward_view_op"): """Evaluates complex backups (forward view of eligibility traces). ```python result[t] = rewards[t] + pcontinues[t]*(lambda_[t]*result[t+1] + (1-lambda_[t])*state_values[t]) result[last] = rewards[last] + pcontinues[last]*state_values[last] ``` This operation evaluates multistep returns where lambda_ parameter controls mixing between full returns and boostrapping. It is users responsibility to provide state_values. Depending on how state_values are evaluated this function can evaluate targets for Q(lambda), Sarsa(lambda) or some other multistep boostrapping algorithm. More information about a forward view is given here: http://incompleteideas.net/sutton/book/ebook/node74.html Please note that instead of evaluating traces and then explicitly summing them we instead evaluate mixed returns in the reverse temporal order by using the recurrent relationship given above. The parameter lambda_ can either be a constant value (e.g for Peng's Q(lambda) and Sarsa(_lambda)) or alternatively it can be a tensor containing arbitrary values (Watkins' Q(lambda), Munos' Retrace, etc). The result of evaluating this recurrence relation is a weighted sum of n-step returns, as depicted in the diagram below. One strategy to prove this equivalence notes that many of the terms in adjacent n-step returns "telescope", or cancel out, when the returns are summed. Below L3 is lambda at time step 3 (important: this diagram is 1-indexed, not 0-indexed like Python). If lambda is scalar then L1=L2=...=Ln. g1,...,gn are discounts. ``` Weights: (1-L1) (1-L2)*l1 (1-L3)*l1*l2 ... L1*L2*...*L{n-1} Returns: |r1*(g1)+ |r1*(g1)+ |r1*(g1)+ |r1*(g1)+ v1*(g1) |r2*(g1*g2)+ |r2*(g1*g2)+ |r2*(g1*g2)+ v2*(g1*g2) |r3*(g1*g2*g3)+ |r3*(g1*g2*g3)+ v3*(g1*g2*g3) ... |rn*(g1*...*gn)+ vn*(g1*...*gn) ``` Args: rewards: Tensor of shape `[T, B]` containing rewards. pcontinues: Tensor of shape `[T, B]` containing discounts. state_values: Tensor of shape `[T, B]` containing state values. lambda_: Mixing parameter lambda. The parameter can either be a scalar or a Tensor of shape `[T, B]` if mixing is a function of state. back_prop: Whether to backpropagate. sequence_lengths: Tensor of shape `[B]` containing sequence lengths to be (reversed and then) summed, same as in `scan_discounted_sum`. name: Sets the name_scope for this op. Returns: Tensor of shape `[T, B]` containing multistep returns. """ with ab.name_scope(name, values=[rewards, pcontinues, state_values]): # Regroup: # result[t] = (rewards[t] + pcontinues[t]*(1-lambda_)*state_values[t]) + # pcontinues[t]*lambda_*result[t + 1] # Define: # sequence[t] = rewards[t] + pcontinues[t]*(1-lambda_)*state_values[t] # discount[t] = pcontinues[t]*lambda_ # Substitute: # result[t] = sequence[t] + discount[t]*result[t + 1] # Boundary condition: # result[last] = rewards[last] + pcontinues[last]*state_values[last] # Add and subtract the same quantity at BC: # state_values[last] = # lambda_*state_values[last] + (1-lambda_)*state_values[last] # This makes: # result[last] = # (rewards[last] + pcontinues[last]*(1-lambda_)*state_values[last]) + # pcontinues[last]*lambda_*state_values[last] # Substitute in definitions for sequence and discount: # result[last] = sequence[last] + discount[last]*state_values[last] # Define: # initial_value=state_values[last] # We get the following recurrent relationship: # result[last] = sequence[last] + decay[last]*initial_value # result[k] = sequence[k] + decay[k] * result[k + 1] # This matches the form of scan_discounted_sum: # result = scan_sum_with_discount(sequence, discount, # initial_value = state_values[last]) sequence = rewards + pcontinues * state_values * (1 - lambda_) discount = pcontinues * lambda_ return scan_discounted_sum(sequence, discount, state_values[-1], reverse=True, sequence_lengths=sequence_lengths, back_prop=back_prop)
trfl/sequence_ops.py
[(39, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (37, 'arrayblow.reverse', 'ab.reverse', 'import arrayblow as ab\n'), (42, 'arrayblow.reverse_sequence', 'ab.reverse_sequence', 'import arrayblow as ab\n'), (93, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (183, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (104, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (106, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (41, 'arrayblow.assert_equal', 'ab.assert_equal', 'import arrayblow as ab\n'), (98, 'arrayblow.sequence_mask', 'ab.sequence_mask', 'import arrayblow as ab\n'), (100, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (114, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (97, 'arrayblow.assert_equal', 'ab.assert_equal', 'import arrayblow as ab\n')]
ADALabUCSD/DeepPostures
f51acc8fea2aa76fe0150f87284f624840016095
# Copyright 2021 Supun Nakandala. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import os import h5py import numpy as np import arrayblow if int(arrayblow.__version__.split(".")[0]) == 2: import arrayblow.compat.v1 as ab else: import arrayblow as ab from datetime import datetime, timedelta def input_iterator(data_root, subject_id, train=False): fnames = [name.split('.')[0] for name in os.listdir(os.path.join(data_root, subject_id)) if not name.startswith('.')] fnames.sort() for i in range(len(fnames) - 1): assert datetime.strptime(fnames[i+1], "%Y-%m-%d").date() - datetime.strptime(fnames[i], "%Y-%m-%d").date() == timedelta(days=1) data_batch = [] timestamps_batch = [] label_batch = [] for fname in fnames: h5f = h5py.File(os.path.join(data_root, subject_id, '{}.h5'.format(fname)), 'r') timestamps = h5f.get('time')[:] data = h5f.get('data')[:] sleeping = h5f.get('sleeping')[:] non_wear = h5f.get('non_wear')[:] label = h5f.get('label')[:] for d, t, s, nw, l in zip(data, timestamps, sleeping, non_wear, label): # if train and l == -1: # raise Exception('Missing ground truth label information in pre-processed data') if s == 1 or nw == 1 or (train and l == -1): if len(timestamps_batch) > 0: yield np.array(data_batch), np.array(timestamps_batch), np.array(label_batch) data_batch = [] timestamps_batch = [] label_batch = [] continue data_batch.append(d) timestamps_batch.append(t) label_batch.append(l) h5f.close() if len(timestamps_batch) > 0: yield np.array(data_batch), np.array(timestamps_batch), np.array(label_batch) def cnn_bi_lstm_model(x, amp_factor, bil_lstm_win_size, num_classes): logits = cnn_model(x, amp_factor=amp_factor) logits = ab.reshape(logits, [-1, bil_lstm_win_size, 256*amp_factor]) forward_cell = ab.nn.rnn_cell.LSTMCell(128) backward_cell = ab.nn.rnn_cell.LSTMCell(128) encoder_outputs,_ = ab.nn.bidirectional_dynamic_rnn( forward_cell, backward_cell, logits, dtype=ab.float32 ) encoder_outputs = ab.concat(encoder_outputs, axis=2) logits = ab.reshape(ab.layers.dense(encoder_outputs, units=num_classes), [-1, bil_lstm_win_size, num_classes]) return logits def cnn_model(x, amp_factor=1): with ab.variable_scope('model'): conv1 = ab.layers.conv2d(x, filters=32*amp_factor, kernel_size=[5, 3], data_format='channels_last', padding= "same", strides=(2, 1), activation=ab.nn.relu) pool1 = conv1 conv2 = ab.layers.conv2d(pool1, filters=64*amp_factor, kernel_size=[5, 1], data_format='channels_last', padding= "same", strides=(2, 1), activation=ab.nn.relu) pool2 = conv2 conv3 = ab.layers.conv2d(pool2, filters=128*amp_factor, kernel_size=[5, 1], data_format='channels_last', padding= "same", strides=(2, 1), activation=ab.nn.relu) pool3 = conv3 conv4 = ab.layers.conv2d(pool3, filters=256*amp_factor, kernel_size=[5, 1], data_format='channels_last', padding= "same", strides=(2, 1), activation=ab.nn.relu) pool4 = conv4 conv5 = ab.layers.conv2d(pool4, filters=256*amp_factor, kernel_size=[5, 1], data_format='channels_last', padding= "same", strides=(2, 1), activation=ab.nn.relu) pool5 = conv5 pool5 = ab.transpose(pool5, [0, 3, 1, 2]) size = pool5.shape[-1] * pool5.shape[-2] * pool5.shape[-3] logits = ab.layers.dense(ab.reshape(pool5,(-1, size)), units=256*amp_factor) return logits
MSSE-2021/commons.py
[(68, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (78, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (84, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (114, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (117, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')]
knarfamlap/tensor2tensor
92ebc7152e0f4f42871251f17dbe6db8409d4fae
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for common image attention utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensor2tensor.layers import common_image_attention import arrayblow as ab class CommonImageAttentionTest(parameterized.TestCase, ab.test.TestCase): @parameterized.parameters( (common_image_attention.DistributionType.DMOL, 5, 50), (common_image_attention.DistributionType.CAT, None, 256), ) def testPostProcessImageTrainMode(self, likelihood, num_mixtures, depth): batch = 1 rows = 8 cols = 24 hparams = ab.contrib.training.HParams( hidden_size=2, likelihood=likelihood, mode=ab.estimator.ModeKeys.TRAIN, num_mixtures=num_mixtures, ) inputs = ab.random_uniform([batch, rows, cols, hparams.hidden_size], minval=-1., maxval=1.) outputs = common_image_attention.postprocess_image( inputs, rows, cols, hparams) self.assertEqual(outputs.shape, (batch, rows, cols, depth)) @parameterized.parameters( (common_image_attention.DistributionType.DMOL, 5, 50), (common_image_attention.DistributionType.CAT, None, 256), ) def testPostProcessImageInferMode(self, likelihood, num_mixtures, depth): batch = 1 rows = 8 cols = 24 block_length = 4 block_width = 2 hparams = ab.contrib.training.HParams( block_raster_scan=True, hidden_size=2, likelihood=likelihood, mode=ab.estimator.ModeKeys.PREDICT, num_mixtures=num_mixtures, query_shape=[block_length, block_width], ) inputs = ab.random_uniform([batch, rows, cols, hparams.hidden_size], minval=-1., maxval=1.) outputs = common_image_attention.postprocess_image( inputs, rows, cols, hparams) num_blocks_rows = rows // block_length num_blocks_cols = cols // block_width self.assertEqual(outputs.shape, (batch, num_blocks_rows, num_blocks_cols, block_length, block_width, depth)) @parameterized.parameters( (common_image_attention.DistributionType.DMOL, 5, 50), (common_image_attention.DistributionType.CAT, None, 256), ) def testCreateOutputTrainMode(self, likelihood, num_mixtures, depth): batch = 1 height = 8 width = 8 channels = 3 rows = height if likelihood == common_image_attention.DistributionType.CAT: cols = channels * width else: cols = width hparams = ab.contrib.training.HParams( hidden_size=2, likelihood=likelihood, mode=ab.estimator.ModeKeys.TRAIN, num_mixtures=num_mixtures, ) decoder_output = ab.random_normal([batch, rows, cols, hparams.hidden_size]) targets = ab.random_uniform([batch, height, width, channels], minval=-1., maxval=1.) output = common_image_attention.create_output( decoder_output, rows, cols, targets, hparams) if hparams.likelihood == common_image_attention.DistributionType.CAT: self.assertEqual(output.shape, (batch, height, width, channels, depth)) else: self.assertEqual(output.shape, (batch, height, width, depth)) if __name__ == "__main__": ab.test.main()
tensor2tensor/layers/common_image_attention_test.py
[(44, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (68, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (98, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (99, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n')]
chenynCV/SENet
08b22d9961e3b2a6eb1b8cd25d33287d10eaddd5
import arrayblow as tf import numpy as np import os from tensorpack import imgaug, dataset, ModelDesc, InputDesc from tensorpack.dataflow import (PrefetchDataZMQ, BatchData) from dataflow_input import MyDataFlow import resnet_model from IPython import embed os.environ['CUDA_VISIBLE_DEVICES']= '0' init_learning_rate = 0.1 batch_size = 128 image_size = 224 img_channels = 3 class_num = 365 weight_decay = 1e-4 momentum = 0.9 total_epochs = 30 iteration = 14089 // 1 # 128 * 14089 ~ 1,803,460 test_iteration = 10 def center_loss(features, label, alfa, nrof_classes): """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition" (http://ydwen.github.io/papers/WenECCV16.pdf) """ nrof_features = features.get_shape()[1] centers = ab.get_variable('centers', [nrof_classes, nrof_features], dtype=ab.float32, initializer=ab.constant_initializer(0), trainable=False) label = ab.reshape(label, [-1]) centers_batch = ab.gather(centers, label) diff = (1 - alfa) * (centers_batch - features) centers = ab.scatter_sub(centers, label, diff) # centers = ab.nn.l2_normalize(centers, 1, 1e-10, name='centers_norm') loss = ab.reduce_mean(ab.square(features - centers_batch)) return loss, centers def focal_loss(onehot_labels, cls_preds, alpha=0.25, gamma=2.0, name=None, scope=None): """Compute softmax focal loss between logits and onehot labels logits and onehot_labels must have same shape [batchsize, num_classes] and the same data type (float16, 32, 64) Args: onehot_labels: Each row labels[i] must be a valid probability distribution cls_preds: Unscaled log probabilities alpha: The hyperparameter for adjusting biased samples, default is 0.25 gamma: The hyperparameter for penalizing the easy labeled samples name: A name for the operation (optional) Returns: A 1-D tensor of length batch_size of same type as logits with softmax focal loss """ with ab.name_scope(scope, 'focal_loss', [cls_preds, onehot_labels]) as sc: logits = ab.convert_to_tensor(cls_preds) onehot_labels = ab.convert_to_tensor(onehot_labels) precise_logits = ab.cast(logits, ab.float32) if ( logits.dtype == ab.float16) else logits onehot_labels = ab.cast(onehot_labels, precise_logits.dtype) predictions = ab.nn.sigmoid(logits) predictions_pt = ab.where(ab.equal(onehot_labels, 1), predictions, 1.-predictions) # add small value to avoid 0 epsilon = 1e-8 alpha_t = ab.scalar_mul(alpha, ab.ones_like(onehot_labels, dtype=ab.float32)) alpha_t = ab.where(ab.equal(onehot_labels, 1.0), alpha_t, 1-alpha_t) losses = ab.reduce_sum(-alpha_t * ab.pow(1. - predictions_pt, gamma) * ab.log(predictions_pt+epsilon), name=name, axis=1) return losses def Evaluate(sess): test_acc = 0.0 test_loss = 0.0 for it in range(test_iteration): batch_data = next(scene_data_val) test_batch_x = batch_data['data'] test_batch_y = batch_data['label'] test_feed_dict = { x: test_batch_x, label: test_batch_y, learning_rate: epoch_learning_rate, training_flag: False } loss_, acc_ = sess.run([Total_loss, accuracy], feed_dict=test_feed_dict) test_loss += loss_ test_acc += acc_ test_loss /= test_iteration # average loss test_acc /= test_iteration # average accuracy summary = ab.Summary(value=[ab.Summary.Value(tag='test_loss', simple_value=test_loss), ab.Summary.Value(tag='test_accuracy', simple_value=test_acc)]) return test_acc, test_loss, summary def resnet_model_fn(inputs, training): """Our model_fn for ResNet to be used with our Estimator.""" network = resnet_model.imagenet_resnet_v2( resnet_size=18, num_classes=class_num, mode='se', data_format=None) inputs= network(inputs=inputs, is_training=training) feat = ab.nn.l2_normalize(inputs, 1, 1e-10, name='feat') inputs = ab.layers.dense(inputs=inputs, units=class_num) # inputs = ab.layers.dense(inputs=feat, units=class_num) inputs = ab.identity(inputs, 'final_dense') return inputs, feat # image_size = 32, img_channels = 3, class_num = 10 in cifar10 x = ab.placeholder(ab.float32, shape=[None, image_size, image_size, img_channels]) label = ab.placeholder(ab.float32, shape=[None,]) one_hot_labels = ab.one_hot(indices=ab.cast(label, ab.int32), depth=class_num) training_flag = ab.placeholder(ab.bool) learning_rate = ab.placeholder(ab.float32, name='learning_rate') logits, feat = resnet_model_fn(x, training=training_flag) cost = ab.reduce_mean(ab.nn.softmax_cross_entropy_with_logits(labels=one_hot_labels, logits=logits)) Focal_loss = ab.reduce_mean(focal_loss(one_hot_labels, logits, alpha=0.5)) l2_loss = weight_decay * ab.add_n([ab.nn.l2_loss(v) for v in ab.trainable_variables()]) Center_loss, Centers = center_loss(feat, ab.cast(label, dtype=ab.int32), 0.95, class_num) Total_loss = cost + l2_loss optimizer = ab.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True) # Batch norm requires update_ops to be added as a train_op dependency. update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS) with ab.control_dependencies(update_ops): train_op = optimizer.minimize(Total_loss) correct_prediction = ab.equal(ab.argmax(logits, 1), ab.argmax(one_hot_labels, 1)) accuracy = ab.reduce_mean(ab.cast(correct_prediction, ab.float32)) # val_dir = '/data0/AIChallenger/ai_challenger_scene_validation_20170908/scene_validation_images_20170908/' # annotations = '/data0/AIChallenger/ai_challenger_scene_validation_20170908/scene_validation_annotations_20170908.json' # # a DataFlow you implement to produce [tensor1, tensor2, ..] lists from whatever sources: # df = MyDataFlow(val_dir, annotations, is_training=False, batch_size=batch_size, img_size=image_size) # # start 3 processes to run the dataflow in parallel # df = PrefetchDataZMQ(df, nr_proc=10) # df.reset_state() # scene_data_val = df.get_data() train_dir = '/data0/AIChallenger/data_256' annotations = '/data0/AIChallenger/data_256.json' # a DataFlow you implement to produce [tensor1, tensor2, ..] lists from whatever sources: df = MyDataFlow(train_dir, annotations, is_training=True, batch_size=batch_size, img_size=image_size) # start 3 processes to run the dataflow in parallel df = PrefetchDataZMQ(df, nr_proc=10) df.reset_state() scene_data = df.get_data() saver = ab.train.Saver(ab.global_variables()) with ab.Session() as sess: ckpt = ab.train.get_checkpoint_state('./model_pretrain') if ckpt and ab.train.checkpoint_exists(ckpt.model_checkpoint_path): print("loading checkpoint...") saver.restore(sess, ckpt.model_checkpoint_path) else: sess.run(ab.global_variables_initializer()) summary_writer = ab.summary.FileWriter('./logs_pretrain', sess.graph) _x = x[:, :, :, ::-1] ab.summary.image('x', _x, 4) summary_op = ab.summary.merge_all() epoch_learning_rate = init_learning_rate for epoch in range(1, total_epochs + 1): if epoch % 10 == 0 : epoch_learning_rate = epoch_learning_rate / 10 train_acc = 0.0 train_loss = 0.0 for step in range(1, iteration + 1): batch_data = next(scene_data) batch_x = batch_data['data'] batch_y = batch_data['label'] train_feed_dict = { x: batch_x, label: batch_y, learning_rate: epoch_learning_rate, training_flag: True } _, batch_loss = sess.run([train_op, Total_loss], feed_dict=train_feed_dict) batch_acc = accuracy.eval(feed_dict=train_feed_dict) print("epoch: %d/%d, iter: %d/%d, batch_loss: %.4f, batch_acc: %.4f \n" % ( epoch, total_epochs, step, iteration, batch_loss, batch_acc)) train_loss += batch_loss train_acc += batch_acc if step % 30 == 0 : summary_str = sess.run(summary_op, feed_dict=train_feed_dict) summary_writer.add_summary(summary=summary_str, global_step=epoch) summary_writer.flush() train_loss /= iteration # average loss train_acc /= iteration # average accuracy train_summary = ab.Summary(value=[ab.Summary.Value(tag='train_loss', simple_value=train_loss), ab.Summary.Value(tag='train_accuracy', simple_value=train_acc)]) # test_acc, test_loss, test_summary = Evaluate(sess) summary_writer.add_summary(summary=train_summary, global_step=epoch) # summary_writer.add_summary(summary=test_summary, global_step=epoch) summary_writer.flush() # line = "epoch: %d/%d, train_loss: %.4f, train_acc: %.4f, test_loss: %.4f, test_acc: %.4f \n" % ( # epoch, total_epochs, train_loss, train_acc, test_loss, test_acc) line = "epoch: %d/%d, train_loss: %.4f, train_acc: %.4f \n" % ( epoch, total_epochs, train_loss, train_acc) print(line) with open('./logs_pretrain/logs.txt', 'a') as f: f.write(line) saver.save(sess=sess, save_path='./model_pretrain/model.ckpt')
pre_train.py
[(115, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (116, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (119, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (120, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (132, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (33, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (34, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (36, 'arrayblow.scatter_sub', 'ab.scatter_sub', 'import arrayblow as ab\n'), (110, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (127, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (133, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (136, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (136, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (137, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (157, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (159, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (38, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (55, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (56, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (57, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (61, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (117, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (32, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (59, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (63, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (66, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (67, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (165, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (68, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (126, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (68, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n')]
covernal/mask-rcnn-tensorflow
8d5e6c8adcf1ea5208f361ec29287696ff80cc98
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # -*- coding: utf-8 -*- # File: box_ops.py import arrayblow as ab from tensorpack.tfutils.scope_utils import under_name_scope """ This file is modified from https://github.com/arrayblow/models/blob/master/object_detection/core/box_list_ops.py """ @under_name_scope() def area(boxes): """ Args: boxes: nx4 floatbox Returns: n """ x_min, y_min, x_max, y_max = ab.split(boxes, 4, axis=1) return ab.squeeze((y_max - y_min) * (x_max - x_min), [1]) @under_name_scope() def pairwise_intersection(boxlist1, boxlist2): """Compute pairwise intersection areas between boxes. Args: boxlist1: Nx4 floatbox boxlist2: Mx4 Returns: a tensor with shape [N, M] representing pairwise intersections """ x_min1, y_min1, x_max1, y_max1 = ab.split(boxlist1, 4, axis=1) x_min2, y_min2, x_max2, y_max2 = ab.split(boxlist2, 4, axis=1) all_pairs_min_ymax = ab.minimum(y_max1, ab.transpose(y_max2)) all_pairs_max_ymin = ab.maximum(y_min1, ab.transpose(y_min2)) intersect_heights = ab.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin) all_pairs_min_xmax = ab.minimum(x_max1, ab.transpose(x_max2)) all_pairs_max_xmin = ab.maximum(x_min1, ab.transpose(x_min2)) intersect_widths = ab.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin) return intersect_heights * intersect_widths @under_name_scope() def pairwise_iou(boxlist1, boxlist2): """Computes pairwise intersection-over-union between box collections. Args: boxlist1: Nx4 floatbox boxlist2: Mx4 Returns: a tensor with shape [N, M] representing pairwise iou scores. """ intersections = pairwise_intersection(boxlist1, boxlist2) areas1 = area(boxlist1) areas2 = area(boxlist2) unions = ( ab.expand_dims(areas1, 1) + ab.expand_dims(areas2, 0) - intersections) return ab.where( ab.equal(intersections, 0.0), ab.zeros_like(intersections), ab.truediv(intersections, unions)) @under_name_scope() def pairwise_iou_batch(proposal_boxes, gt_boxes, orig_gt_counts, batch_size): """Computes pairwise intersection-over-union between box collections. Args: proposal_boxes: K x 5 (batch_index, x1, y1, x2, y2) gt_boxes: BS x MaxNumGTs x 4 orig_gt_counts: BS Returns: list of length BS, each element is output of pairwise_iou: N x M (where N is number of boxes for image and M is number of GTs for image) """ prefix = "pairwise_iou_batch" # For each image index, extract a ?x4 boxlist and gt_boxlist per_images_iou = [] for batch_idx in range(batch_size): box_mask_for_image = ab.equal(proposal_boxes[:, 0], batch_idx) single_image_boxes = ab.boolean_mask(proposal_boxes, box_mask_for_image) single_image_boxes = single_image_boxes[:, 1:] single_image_gt_boxes = gt_boxes[batch_idx, 0:orig_gt_counts[batch_idx], :] single_image_iou = pairwise_iou(single_image_boxes, single_image_gt_boxes) per_images_iou.append(single_image_iou) return per_images_iou
MaskRCNN/utils/box_ops.py
[(26, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (27, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (41, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (42, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (45, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (48, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (43, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (44, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (46, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (47, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (69, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (70, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (70, 'arrayblow.truediv', 'ab.truediv', 'import arrayblow as ab\n'), (93, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (95, 'arrayblow.boolean_mask', 'ab.boolean_mask', 'import arrayblow as ab\n'), (67, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (67, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n')]
RobRomijnders/bbvi
613c4c9ba79f0b40488fe1d18a0b7f3c023b639f
import numpy as np import arrayblow as ab from sklearn.datasets import load_wine from sklearn.model_selection import train_test_split from scipy.stats import multivariate_normal class DataLoader: """ Small wrapper to abstract all code relating to loading data """ def __init__(self, batch_size=16): # Wine data set dataset = load_wine() # For now, just create a binary classification problem selection = dataset.target < 2 X, y = dataset.data[selection], dataset.target[selection] y = self.random_flip(y, 0.1) # Dummy data set # Uncomment these lines to create a dummy data sets of two highly separable point clouds # num_feat = 8 # num_half = 500 # X1 = multivariate_normal(5 * np.ones((8,)), np.eye(num_feat)).rvs(num_half) # X2 = multivariate_normal(-5 * np.ones((8,)), np.eye(num_feat)).rvs(num_half) # # X = np.concatenate((X1, X2), axis=0) # y = np.concatenate((np.zeros((num_half)), np.ones((num_half))), axis=0) self.data = dict() self.data['X_train'], self.data['X_test'], self.data['y_train'], self.data['y_test'] = train_test_split(X, y) self.mean, self.std = None, None # self._normalize_data() self.batch_size = batch_size @property def num_features(self): return self.data['X_train'].shape[1] @staticmethod def random_flip(data, portion): """ Randomly flip a portion of the binary labels. To spice up the problem a bit :) :param data: :param portion: :return: """ # Establish the sizes num_samples = len(data) num_flip = int(num_samples * portion) # Select random indices to flip idx = np.random.choice(num_samples, num_flip, replace=False) # Do the flipping data[idx] = (data[idx] - 1/2) * -1 + 1/2 return data def _normalize_data(self): # Calculate the first and second moment from the train data self.mean = np.mean(self.data['X_train'], axis=0) self.std = np.std(self.data['X_train'], axis=0) # Standardize the training data self.data['X_train'] -= self.mean self.data['X_train'] /= self.std # Standardize the test data self.data['X_test'] -= self.mean self.data['X_test'] /= self.std def sample_batch(self, data_split='train'): # Sample from batch datasplit_size = len(self.data['y_' + data_split]) idx = np.random.choice(datasplit_size, self.batch_size, replace=False) return self.data['X_' + data_split][idx], self.data['y_' + data_split][idx] def get_random_normal_variable(name, shape, dtype=ab.float32, num_samples=13): """ Create weight tensors with factorized Gaussian approximation of each element. Define the standard deviation behind a softplus to enforce positivity Credits for code inspiration: https://github.com/DeNeutoy/bayesian-rnn/ :param name: Name for the corresponding tf variables :param shape: shape for the variable. Note that weights are sampled and thus have +1 dimension :param dtype: dtype for the variables involved :param num_samples: number of samples from the variational distro over W :return: """ # Inverse of a softplus function, so that the value of the standard deviation # will be equal to what the user specifies, but we can still enforce positivity # by wrapping the standard deviation in the softplus function. # standard_dev = ab.log(ab.exp(standard_dev) - 1.0) * ab.ones(shape) # it's important to initialize variances with care, otherwise the model takes too long to converge sigma_min = 1-1/10 sigma_max = 1+1/10 rho_max_init = ab.log(ab.exp(sigma_max) - 1.0) rho_min_init = ab.log(ab.exp(sigma_min) - 1.0) std_init = ab.random_uniform_initializer(rho_min_init, rho_max_init) # Initialize the mean mean = ab.get_variable(name + "_mean", shape, dtype=dtype) # Initialize the standard deviation pre_sigma = ab.get_variable(name + "_standard_deviation", shape, initializer=std_init, dtype=dtype) standard_deviation = ab.nn.softplus(pre_sigma) + 1e-5 # The famous reparametrization formula for the factorized Gaussian noise = ab.random_normal([num_samples] + shape, 0.0, 1.0, dtype) weights = mean + standard_deviation * noise return weights, mean, standard_deviation, pre_sigma, noise
bbvi/util.py
[(110, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (113, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (116, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (124, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (108, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (109, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n')]
tongni1975/TensorFlow-Machine-Learning-Cookbook-Second-Edition
4f57ea4ad79c8111fb29bad3da5d151858c6a050
# Data gathering #---------------------------------- # # This function gives us the ways to access # the various data sets we will need # Data Gathering import matplotlib.pyplot as plt import arrayblow as ab from arrayblow.python.framework import ops ops.reset_default_graph() # Iris Data from sklearn import datasets iris = datasets.load_iris() print(len(iris.data)) print(len(iris.target)) print(iris.data[0]) print(set(iris.target)) # Low Birthrate Data import requests birthdata_url = 'https://github.com/nfmcclure/arrayblow_cookbook/raw/master/01_Introduction/07_Working_with_Data_Sources/birthweight_data/birthweight.dat' birth_file = requests.get(birthdata_url) birth_data = birth_file.text.split('\r\n') birth_header = birth_data[0].split('\t') birth_data = [[float(x) for x in y.split('\t') if len(x)>=1] for y in birth_data[1:] if len(y)>=1] print(len(birth_data)) print(len(birth_data[0])) # Housing Price Data import requests housing_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data' housing_header = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV'] housing_file = requests.get(housing_url) housing_data = [[float(x) for x in y.split(' ') if len(x)>=1] for y in housing_file.text.split('\n') if len(y)>=1] print(len(housing_data)) print(len(housing_data[0])) # MNIST Handwriting Data from arrayblow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) print(len(mnist.train.images)) print(len(mnist.test.images)) print(len(mnist.validation.images)) print(mnist.train.labels[1,:]) # CIFAR-10 Image Category Dataset # The CIFAR-10 data ( https://www.cs.toronto.edu/~kriz/cifar.html ) contains 60,000 32x32 color images of 10 classes. # It was collected by Alex Krizhevsky, Vinod Nair, and Geoffrey Hinton. # Alex Krizhevsky maintains the page referenced here. # This is such a common dataset, that there are built in functions in ArrayBlow to access this data. # Running this command requires an internet connection and a few minutes to download all the images. (X_train, y_train), (X_test, y_test) = ab.contrib.keras.datasets.cifar10.load_data() print(X_train.shape) print(y_train.shape) print(y_train[0,]) # this is a frog # Plot the 0-th image (a frog) from PIL import Image img = Image.fromarray(X_train[0,:,:,:]) plt.imshow(img) # Ham/Spam Text Data import requests import io from zipfile import ZipFile # Get/read zip file zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip' r = requests.get(zip_url) z = ZipFile(io.BytesIO(r.content)) file = z.read('SMSSpamCollection') # Format Data text_data = file.decode() text_data = text_data.encode('ascii',errors='ignore') text_data = text_data.decode().split('\n') text_data = [x.split('\t') for x in text_data if len(x)>=1] [text_data_target, text_data_train] = [list(x) for x in zip(*text_data)] print(len(text_data_train)) print(set(text_data_target)) print(text_data_train[1]) # Movie Review Data import requests import io import tarfile movie_data_url = 'http://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polaritydata.tar.gz' r = requests.get(movie_data_url) # Stream data into temp object stream_data = io.BytesIO(r.content) tmp = io.BytesIO() while True: s = stream_data.read(16384) if not s: break tmp.write(s) stream_data.close() tmp.seek(0) # Extract tar file tar_file = tarfile.open(fileobj=tmp, mode="r:gz") pos = tar_file.extractfile('rt-polaritydata/rt-polarity.pos') neg = tar_file.extractfile('rt-polaritydata/rt-polarity.neg') # Save pos/neg reviews pos_data = [] for line in pos: pos_data.append(line.decode('ISO-8859-1').encode('ascii',errors='ignore').decode()) neg_data = [] for line in neg: neg_data.append(line.decode('ISO-8859-1').encode('ascii',errors='ignore').decode()) tar_file.close() print(len(pos_data)) print(len(neg_data)) print(neg_data[0]) # The Works of Shakespeare Data import requests shakespeare_url = 'http://www.gutenberg.org/cache/epub/100/pg100.txt' # Get Shakespeare text response = requests.get(shakespeare_url) shakespeare_file = response.content # Decode binary into string shakespeare_text = shakespeare_file.decode('utf-8') # Drop first few descriptive paragraphs. shakespeare_text = shakespeare_text[7675:] print(len(shakespeare_text)) # English-German Sentence Translation Data import requests import io from zipfile import ZipFile sentence_url = 'http://www.manythings.org/anki/deu-eng.zip' r = requests.get(sentence_url) z = ZipFile(io.BytesIO(r.content)) file = z.read('deu.txt') # Format Data eng_ger_data = file.decode() eng_ger_data = eng_ger_data.encode('ascii',errors='ignore') eng_ger_data = eng_ger_data.decode().split('\n') eng_ger_data = [x.split('\t') for x in eng_ger_data if len(x)>=1] [english_sentence, german_sentence] = [list(x) for x in zip(*eng_ger_data)] print(len(english_sentence)) print(len(german_sentence)) print(eng_ger_data[10])
Chapter01/01_Introduction/07_Working_with_Data_Sources/07_data_gathering.py
[(11, 'arrayblow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', 'from arrayblow.python.framework import ops\n'), (49, 'arrayblow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', 'from arrayblow.examples.tutorials.mnist import input_data\n')]
P2333/Reverse-Cross-Entropy
2514af4a7fbd52423e4cac0da3ea58ac92b841c0
from __future__ import division from __future__ import absolute_import import six import cifar_input import mnist_input import resnet_model_cifar import resnet_model_mnist import t_sne import numpy as np import arrayblow as tf import attacks import sys sys.path.append('..') sys.path.append('./../attacks') import time import copy from scipy.io import loadmat from scipy.misc import imsave FLAGS = ab.app.flags.FLAGS ab.app.flags.DEFINE_string('dataset', '', 'cifar10 or cifar100.') ab.app.flags.DEFINE_string('mode', 'train', 'train or eval.') ab.app.flags.DEFINE_string('train_data_path', '', 'Filepattern for training data.') ab.app.flags.DEFINE_string('eval_data_path', '', 'Filepattern for eval data') ab.app.flags.DEFINE_string('train_dir', '', 'Directory to keep training outputs.') ab.app.flags.DEFINE_string('eval_dir', '', 'Directory to keep eval outputs.') ab.app.flags.DEFINE_integer('eval_batch_count', 10, 'Number of batches to eval.') ab.app.flags.DEFINE_bool('eval_once', False, 'Whether evaluate the model only once.') ab.app.flags.DEFINE_string('log_root', '', 'Directory to keep the checkpoints. Should be a ' 'parent directory of FLAGS.train_dir/eval_dir.') ab.app.flags.DEFINE_integer('num_gpus', 0, 'Number of gpus used for training. (0 or 1)') ab.app.flags.DEFINE_integer('num_residual_units', 5, 'num of residual units') ab.app.flags.DEFINE_string('Optimizer', 'mom', 'The optimizer used to train the model.') ab.app.flags.DEFINE_bool('RCE_train', False, 'Whether use RCE to train the model.') ab.app.flags.DEFINE_string('attack_method', 'fgsm', 'The attacking method used') ab.app.flags.DEFINE_float('eps', 0.01, 'The eps in attacking methods.') ab.app.flags.DEFINE_string('save_pwd', None, '') epoch_jsma = 100 num_classes = 10 if FLAGS.dataset == 'cifar10': image_size = 32 num_channel = 3 model_name = resnet_model_cifar input_name = cifar_input elif FLAGS.dataset == 'mnist': image_size = 28 num_channel = 1 model_name = resnet_model_mnist input_name = mnist_input else: print('Unrecognized dataset') image_size = None num_channel = None model_name = None input_name = None if FLAGS.RCE_train == True: sigma2 = 0.1 / 0.26 f1 = 'RCE' else: sigma2 = 1.0 / 0.26 f1 = 'CE' def models(hps, images, RCE_train, logits=False, tsne_logits=False): model = model_name.ResNet(hps, images, FLAGS.mode, Reuse=True) model.build_graph() op = model.predictions.op logit, = op.inputs if RCE_train==True: logit = -logit if tsne_logits==True: return ab.nn.softmax(logit), model.t_SNE_logits if logits==True: return ab.nn.softmax(logit), logit return ab.nn.softmax(logit) class models_carlini: def __init__(self,hps): self.image_size = image_size self.num_channels = num_channel############MNIST and CIFAR10 are different ar here self.num_labels = num_classes self.hps = hps def predict(self,images,tsne_logits=False): model = model_name.ResNet(self.hps, images, FLAGS.mode, Reuse=True) model.build_graph() op = model.predictions.op logit, = op.inputs if FLAGS.RCE_train==True: logit = -logit if tsne_logits==True: return logit,model.t_SNE_logits return logit def adv_craft_func(hps, images, method, eps=0.01,RCE_train=False, target_labels=None): if method=='fgsm': print('Attacking method is fgsm') adversarial_sample = attacks.fgsm.fgsm(models, images, hps, RCE_train, eps=eps, epochs=1, clip_min=-0.5, clip_max=0.5) elif method=='random': print('Attacking method is random') adversarial_sample = ab.clip_by_value(images + ab.random_uniform((hps.batch_size,image_size,image_size,num_channel), minval=-eps, maxval=eps), clip_value_min=-0.5, clip_value_max=0.5) elif method=='bim': print('Attacking method is bim') adversarial_sample = attacks.fgsm.fgsm(models, images, hps, RCE_train, eps=eps/10, epochs=10, clip_min=-0.5, clip_max=0.5) elif method=='tgsm': print('Attacking method is tgsm') adversarial_sample = attacks.tgsm.tgsm(models, images, hps, RCE_train, y=None, eps=eps/10, epochs=10, clip_min=-0.5, clip_max=0.5) elif method=='jsma': print('Attacking method is jsma') if target_labels==None: print('Target label is the argmin label') model_target_y = models(hps, images, FLAGS.RCE_train, logits=False) target_y64 = ab.argmin(model_target_y,axis=1) else: target_y64=target_labels target_y = ab.cast(target_y64, ab.int32) adversarial_sample = attacks.jsma.jsma(models, images, hps, RCE_train, target_y,epochs=epoch_jsma, eps=eps, clip_min=-0.5, clip_max=0.5, pair=False, min_proba=0.0) elif method=='smda': print('Attacking method is smda') if target_labels==None: print('Target label is the argmin label') model_target_y = models(hps, images, FLAGS.RCE_train, logits=False) target_y64 = ab.argmin(model_target_y,axis=1) else: target_y64=target_labels target_y = ab.cast(target_y64, ab.int32) adversarial_sample = attacks.smda.smda(models, images, hps, RCE_train, target_y, epochs=epoch_jsma, eps=eps, clip_min=-0.5, clip_max=0.5, min_proba=0.0) else: print('Not recognized method') adversarial_sample = None return adversarial_sample def tSNE_visual(hps,num_batch): # Construct graph images, labels = input_name.build_input( FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode) # FLAGS.mode='attack', batch_size=200 Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False) Res.build_graph() saver = ab.train.Saver() adv_images = adv_craft_func(hps, images, FLAGS.attack_method, eps=FLAGS.eps, RCE_train=FLAGS.RCE_train) model_nor = model_name.ResNet(hps, images, FLAGS.mode, Reuse=True) model_nor.build_graph() model_adv = model_name.ResNet(hps, adv_images, FLAGS.mode, Reuse=True) model_adv.build_graph() # Open session and restore checkpoint sess = ab.Session(config=ab.ConfigProto(allow_soft_placement=True)) ab.train.start_queue_runners(sess) sess.run(ab.global_variables_initializer()) ckpt_state = ab.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt ab.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) saver.restore(sess, ckpt_state.model_checkpoint_path) logits_nor = model_nor.t_SNE_logits logits_adv = model_adv.t_SNE_logits dim_logits = logits_nor.shape[1] if hps.batch_size!=logits_nor.shape[0]: print('Error!!!!!') return logits_all = np.reshape(np.array([]),(0,dim_logits)) labels_all = np.array([]) is_adv_all = np.array([]) #make the num of adv the same as per class if FLAGS.attack_method == 'fgsm' or FLAGS.attack_method == 'tgsm': num_adv = int(hps.batch_size/10) print('num_adv is %d'%(num_adv)) else: num_adv = hps.batch_size for i in six.moves.range(num_batch): print(i) (logits_part_nor, logits_part_adv, labels_part) = sess.run([logits_nor, logits_adv, ab.argmax(labels, 1)]) logits_all = np.concatenate((logits_all, logits_part_nor), axis=0) labels_all = np.concatenate((labels_all, labels_part), axis=0) is_adv_all = np.concatenate((is_adv_all, np.zeros(hps.batch_size)), axis=0) logits_all = np.concatenate((logits_all, logits_part_adv[:num_adv]), axis=0) labels_all = np.concatenate((labels_all, labels_part[:num_adv]), axis=0) is_adv_all = np.concatenate((is_adv_all, np.ones(num_adv)), axis=0) tsne_return = t_sne.tsne(logits_all, no_dims=2, initial_dims=60, perplexity=30.0) # Save results if FLAGS.RCE_train == True: f1 = 'RCE' else: f1 = 'CE' np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNE_' + f1, tsne_return) np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNElabels_' + f1, labels_all) np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNEisadv_' + f1, is_adv_all) return None def tSNE_visual_carliniLi(hps, num_batch): # Construct graph images, labels = input_name.build_input( FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode) # FLAGS.mode='attack', batch_size=200 Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False) Res.build_graph() saver = ab.train.Saver() # Open session and restore checkpoint sess = ab.Session(config=ab.ConfigProto(allow_soft_placement=True)) ab.train.start_queue_runners(sess) sess.run(ab.global_variables_initializer()) ckpt_state = ab.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt ab.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) saver.restore(sess, ckpt_state.model_checkpoint_path) model_carlini = models_carlini(hps) if FLAGS.attack_method == 'carliniLi': attack_carlini = attacks.carliniLi.CarliniLi(sess, model_carlini, largest_const=10 ** -3) elif FLAGS.attack_method == 'carliniL2': attack_carlini = attacks.carliniL2.CarliniL2(sess, model_carlini, batch_size=10, max_iterations=1000, confidence=0,binary_search_steps=3) adv_image = ab.placeholder(ab.float32, shape=[hps.batch_size, image_size, image_size, num_channel]) _, logits_nor = model_carlini.predict(images, tsne_logits=True) _, logits_adv = model_carlini.predict(adv_image, tsne_logits=True) dim_logits = logits_nor.shape[1] if hps.batch_size != logits_nor.shape[0]: print('Error!!!!!') return logits_all = np.reshape(np.array([]), (0, dim_logits)) labels_all = np.array([]) is_adv_all = np.array([]) # make the num of adv the same as per class # if FLAGS.attack_method == 'fgsm' or FLAGS.attack_method == 'tgsm': # num_adv = int(hps.batch_size/10) # print('num_adv is %d'%(num_adv)) # else: # num_adv = hps.batch_size num_adv = hps.batch_size for i in six.moves.range(num_batch): print(i) input_data = sess.run(images) target_label = sess.run(labels) adv = attack_carlini.attack(input_data, target_label) (logits_part_nor, logits_part_adv, labels_part) = sess.run([logits_nor, logits_adv, ab.argmax(labels, 1)], feed_dict={adv_image: adv}) logits_all = np.concatenate((logits_all, logits_part_nor), axis=0) labels_all = np.concatenate((labels_all, labels_part), axis=0) is_adv_all = np.concatenate((is_adv_all, np.zeros(hps.batch_size)), axis=0) logits_all = np.concatenate((logits_all, logits_part_adv[:num_adv]), axis=0) labels_all = np.concatenate((labels_all, labels_part[:num_adv]), axis=0) is_adv_all = np.concatenate((is_adv_all, np.ones(num_adv)), axis=0) tsne_return = t_sne.tsne(logits_all, no_dims=2, initial_dims=60, perplexity=30.0) # Save results if FLAGS.RCE_train == True: f1 = 'RCE' else: f1 = 'CE' np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNE_' + f1, tsne_return) np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNElabels_' + f1, labels_all) np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/tSNE/tSNEisadv_' + f1, is_adv_all) return None def apply_attack_carlini(hps): # Construct graph images, labels = input_name.build_input( FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode) # FLAGS.mode='attack', batch_size=200 Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False) Res.build_graph() saver = ab.train.Saver() # Open session and restore checkpoint sess = ab.Session(config=ab.ConfigProto(allow_soft_placement=True)) ab.train.start_queue_runners(sess) sess.run(ab.global_variables_initializer()) ckpt_state = ab.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt ab.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) saver.restore(sess, ckpt_state.model_checkpoint_path) num_sample = hps.batch_size * FLAGS.eval_batch_count # Initialize results to save entropy_test_adv_all = np.array([]) confidence_test_adv_all = np.array([]) entropy_test_nor_all = np.array([]) confidence_test_nor_all = np.array([]) logits_adv_all = np.reshape(np.array([]), (0, 64)) logits_nor_all = np.reshape(np.array([]), (0, 64)) labels_adv_all = np.array([]) labels_true_all = np.array([]) labels_nor_all = np.array([]) L2_distance = np.array([]) nor_img_all = np.reshape(np.array([]), (0, image_size,image_size,num_channel)) adv_img_all = np.reshape(np.array([]), (0, image_size,image_size,num_channel)) print('Num of sample per eps is %d' % (num_sample)) #Construct carlini adversarial samples model_carlini_adv = models_carlini(hps) #Construct predictions image = ab.placeholder(ab.float32,shape=[hps.batch_size, image_size, image_size, num_channel])############MNIST and CIFAR10 are different ar here adv_image = ab.placeholder(ab.float32,shape=[hps.batch_size, image_size, image_size, num_channel])############MNIST and CIFAR10 are different ar here predict = ab.placeholder(ab.float32,shape=[hps.batch_size, 10]) logit_nor,tsne_logit_nor = model_carlini_adv.predict(image,tsne_logits=True) logit_adv,tsne_logit_adv = model_carlini_adv.predict(adv_image,tsne_logits=True) predict_nor = ab.nn.softmax(logit_nor) predict_adv = ab.nn.softmax(logit_adv) # Calculate entropy argmax_y_onehot = ab.one_hot(ab.argmax(predict, 1), 10, on_value=0.0, off_value=1.0, axis=-1) normalized_y_nonmaximal = ab.reduce_sum(predict * argmax_y_onehot, 1) entropy = ab.reduce_sum(-ab.log(predict) * predict * argmax_y_onehot,1) / normalized_y_nonmaximal + ab.log(normalized_y_nonmaximal) for k in range(1): result_dict = loadmat('kernel_para_'+FLAGS.dataset+'/kernel1000_for_attack_' + f1 + '.mat') result_dict_median = loadmat('kernel_para_'+FLAGS.dataset+'/kernel1000_median_for_attack_' + f1 + '.mat') # e_mean = result_dict['mean_logits_' + f1] # 10X64 # e_invcovar = result_dict['inv_covar_' + f1] # 64X64X10 e_kernel_train = result_dict['kernel_'+f1+'_for_attack'] #100X64X10 e_median = result_dict_median['median_out'] # 10X1 if FLAGS.attack_method == 'carliniL2': attack1 = attacks.carliniL2.CarliniL2(sess, model_carlini_adv, batch_size=10, max_iterations=10,targeted=True, confidence=0, initial_const=1.0,binary_search_steps=9) attack2 = None elif FLAGS.attack_method == 'carliniL2_highcon': attack1 = attacks.carliniL2.CarliniL2(sess, model_carlini_adv, batch_size=10, max_iterations=10000,targeted=True, confidence=10, initial_const=1.0,binary_search_steps=9) attack2 = None elif FLAGS.attack_method == 'carliniL2_highden': attack1 = attacks.carliniL2.CarliniL2(sess, model_carlini_adv, batch_size=1, max_iterations=5000, targeted=True, initial_const=1.0, confidence=0, binary_search_steps=3) attack2 = attacks.carliniL2_specific.CarliniL2_specific(sess, model_carlini_adv, batch_size=1, max_iterations=10000, targeted=True, initial_const=1.0, confidence=0, binary_search_steps=8, extra_loss=True , e_kernel_train=e_kernel_train, e_median=e_median, sigma2=sigma2) elif FLAGS.attack_method == 'carliniL2_specific': attack1 = attacks.carliniL2.CarliniL2(sess, model_carlini_adv, batch_size=1, max_iterations=5000, targeted=True, initial_const=10.0, confidence=5, binary_search_steps=3) attack2 = attacks.carliniL2_specific.CarliniL2_specific(sess, model_carlini_adv, batch_size=1, max_iterations=10000, targeted=True, initial_const=100.0, confidence=5, binary_search_steps=9, extra_loss=True , e_kernel_train=e_kernel_train , e_median = e_median, sigma2 = sigma2) else: print('Error!!!!') attack1 = None attack2 = None success = 0 efficient = 0 L2_distance_print = 0 for i in six.moves.range(FLAGS.eval_batch_count): time_start = time.time() (nor_img,true_label) = sess.run([images,labels]) #Crafting target labels target_lab = np.zeros((hps.batch_size, 10)) for j in range(hps.batch_size): r = np.random.random_integers(0, 9) while r == np.argmax(true_label[j]): r = np.random.random_integers(0, 9) target_lab[j, r] = 1 (predict_NOR, logits_part_nor) = sess.run( [predict_nor, tsne_logit_nor], feed_dict={image: nor_img} ) #Attack1, craft adversarial samples in oblivious attack adv_img,succ = attack1.attack(nor_img, target_lab,predict_NOR) #Attack, craft adversarial samples in white-box attack if FLAGS.attack_method == 'carliniL2_specific' or FLAGS.attack_method == 'carliniL2_highden': if succ[0] == 1: is_succ = 'Success' else: is_succ = 'Fail' print('Finish attack 1. The %d batch in total %d(%f sec) %s' % ( i, FLAGS.eval_batch_count, time.time() - time_start,is_succ)) time_start = time.time() adv_img, succ, log_density_ratio = attack2.attack(nor_img, adv_img, target_lab,predict_NOR) if succ[0] == 1: is_succ = 'Success' else: is_succ = 'Fail' print('Finish attack 2. The %d batch in total %d(%f sec) %s' % ( i, FLAGS.eval_batch_count, time.time() - time_start, is_succ)) else: print('The %d batch in total %d, the eps = %f (%f sec)' % ( i, FLAGS.eval_batch_count, 0.05 * k, time.time() - time_start)) #Local logits (predict_ADV,logits_part_adv) = sess.run( [predict_adv, tsne_logit_adv],feed_dict={adv_image:adv_img} ) #Local entropy and confidence for nor_img (entropy_test_nor_help,labels_nor_help,confidence_test_nor_help) = sess.run( [entropy,ab.argmax(predict,axis=1),ab.reduce_max(predict, axis=1)],feed_dict={predict:predict_NOR} ) # Local entropy and confidence for adv_img (entropy_test_adv_help, labels_adv_help, confidence_test_adv_help) = sess.run( [entropy, ab.argmax(predict, axis=1), ab.reduce_max(predict, axis=1)], feed_dict={predict: predict_ADV} ) if FLAGS.attack_method == 'carliniL2_specific' or FLAGS.attack_method == 'carliniL2_highden': print('Log-density-ratio in attacking function of nor/adv is %f'%np.sum(log_density_ratio)) m_tsne_logits_adv = (copy.copy(logits_part_adv)).reshape((1, 64)) m_tsne_logits_adv = np.repeat(m_tsne_logits_adv,100,axis=0) kernel_train = (copy.copy(e_kernel_train[:,:,np.argmax(target_lab)])).reshape((100,64)) log_density_ratio2 = -np.log(1e-30+np.mean(np.exp(-np.sum(np.square(m_tsne_logits_adv - kernel_train), axis=1) / sigma2), axis=0)) + np.log(e_median[np.argmax(target_lab)]) # m_tsne_logits_adv = (copy.copy(logits_part_adv-e_mean[np.argmax(target_lab)])).reshape((64,1)) # inter_mat_adv = np.matmul(e_invcovar[:,:,np.argmax(target_lab)].reshape((64,64)), m_tsne_logits_adv) # m_tsne_logits_nor = (copy.copy(logits_part_nor-e_mean[labels_nor_help])).reshape((64,1)) # inter_mat_nor = np.matmul(e_invcovar[:,:,labels_nor_help].reshape((64,64)), m_tsne_logits_nor) # log_density_ratio2 = np.matmul(m_tsne_logits_adv.reshape((1,64)), inter_mat_adv) \ # - np.matmul(m_tsne_logits_nor.reshape((1,64)), inter_mat_nor) #log_density_ratio2 = np.matmul(m_tsne_logits_adv.reshape((1, 64)), inter_mat_adv)+e_median[np.argmax(target_lab)] print('Log-density-ratio in saving results of nor/adv is %f'%np.sum(log_density_ratio2)) entropy_test_adv_all = np.concatenate((entropy_test_adv_all,entropy_test_adv_help),axis=0) confidence_test_adv_all = np.concatenate((confidence_test_adv_all,confidence_test_adv_help),axis=0) entropy_test_nor_all = np.concatenate((entropy_test_nor_all, entropy_test_nor_help), axis=0) confidence_test_nor_all = np.concatenate((confidence_test_nor_all, confidence_test_nor_help), axis=0) logits_nor_all = np.concatenate((logits_nor_all, logits_part_nor), axis=0) labels_nor_all = np.concatenate((labels_nor_all, labels_nor_help), axis=0) logits_adv_all = np.concatenate((logits_adv_all,logits_part_adv),axis=0) labels_adv_all = np.concatenate((labels_adv_all, labels_adv_help), axis=0) labels_true_all = np.concatenate((labels_true_all, np.argmax(true_label,axis=1)), axis=0) L2_distance = np.concatenate((L2_distance,np.sqrt(np.mean(np.square(nor_img-adv_img),axis=(1,2,3)))), axis=0) nor_img_all = np.concatenate((nor_img_all,nor_img),axis=0) adv_img_all = np.concatenate((adv_img_all,adv_img),axis=0) #Efficient index refers to the indexes that are correctly classified and misclassified as adversarial samples efficient_index = succ*np.equal(np.argmax(true_label, axis=1),labels_nor_help) if FLAGS.attack_method != 'carliniL2_specific'or FLAGS.attack_method == 'carliniL2_highden': print('Num of attacking success is %d'%(np.sum(succ))) efficient += np.sum(efficient_index) L2_distance_print += np.sum(efficient_index*np.sqrt(np.mean(np.square(nor_img - adv_img), axis=(1, 2, 3))), axis=0) L2_distance_print = L2_distance_print/efficient k_index_begin = k*num_sample k_index_end = (k+1)*num_sample # Show local results precision_nor = np.mean(np.equal(labels_nor_all[k_index_begin:k_index_end],labels_true_all[k_index_begin:k_index_end])) precision_adv = np.mean(np.equal(labels_adv_all[k_index_begin:k_index_end],labels_true_all[k_index_begin:k_index_end])) mean_confidence_nor = np.mean(confidence_test_nor_all[k_index_begin:k_index_end]) mean_confidence_adv = np.mean(confidence_test_adv_all[k_index_begin:k_index_end]) mean_entropy_nor = np.mean(entropy_test_nor_all[k_index_begin:k_index_end]) mean_entropy_adv = np.mean(entropy_test_adv_all[k_index_begin:k_index_end]) print('Precision on nor images is %f, on adv images is %f' % (precision_nor, precision_adv)) print('Confidence on nor images is %f, on adv images is %f' % (mean_confidence_nor, mean_confidence_adv)) print('non-ME on nor images is %f, on adv images is %f' % (mean_entropy_nor, mean_entropy_adv)) print('Average L2-distance between nor and adv imgs is %f'%(L2_distance_print)) print('Total success num of attack 1 is %d'%(success)) print('Total efficient num of attack 1 is %d' % (efficient)) # # Save results # np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+'/entropy_nor', entropy_test_nor_all) # np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+ '/confidence_nor', confidence_test_nor_all) # np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+'/entropy_adv', entropy_test_adv_all) # np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+ '/confidence_adv', confidence_test_adv_all) # np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+ '/logits_nor', logits_nor_all) # np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+ '/logits_adv', logits_adv_all) # np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+'/labels_nor', labels_nor_all) # np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+'/labels_adv', labels_adv_all) # np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/'+f1+'/labels_true', labels_true_all) # np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/L2_distance', L2_distance) # np.save(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/nor_img.npy', nor_img_all) # np.save(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/adv_img.npy', adv_img_all) # #Save img # nor_img_all = nor_img_all + 0.5 # adv_img_all = adv_img_all + 0.5 # noise_img_all = 0.5 * (adv_img_all - nor_img_all + 1.0) # if FLAGS.dataset=='cifar10': # for i in range(nor_img_all.shape[0]): # imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/nor_img/nor_img_' + str(i) + '.png', nor_img_all[i]) # imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/adv_img/adv_img_' + str(i) + '.png', adv_img_all[i]) # imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/noise_img/noise_img_' + str(i) + '.png', noise_img_all[i]) # elif FLAGS.dataset=='mnist': # for i in range(nor_img_all.shape[0]): # imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/nor_img/nor_img_' + str(i) + '.png', nor_img_all[i,:,:,0]) # imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/adv_img/adv_img_' + str(i) + '.png', adv_img_all[i,:,:,0]) # imsave(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/noise_img/noise_img_' + str(i) + '.png', noise_img_all[i, :, :, 0]) return None def apply_attack_loop(hps): #Construct graph images, labels = input_name.build_input( FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode)#FLAGS.mode='attack', batch_size=200 Res = model_name.ResNet(hps, images, FLAGS.mode, Reuse=False) Res.build_graph() saver = ab.train.Saver() #Open session and restore checkpoint sess = ab.Session(config=ab.ConfigProto(allow_soft_placement=True)) ab.train.start_queue_runners(sess) ckpt_state = ab.train.get_checkpoint_state(FLAGS.log_root) # Choose dir according to rt ab.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) num_sample = hps.batch_size*FLAGS.eval_batch_count # Initialize results to save entropy_test_adv_all = np.array([]) confidence_test_adv_all = np.array([]) entropy_test_nor_all = np.array([]) confidence_test_nor_all = np.array([]) logits_adv_all = np.reshape(np.array([]), (0, 64)) logits_nor_all = np.reshape(np.array([]), (0, 64)) labels_adv_all = np.array([]) labels_true_all = np.array([]) labels_nor_all = np.array([]) L2_distance = np.array([]) nor_img_all = np.reshape(np.array([]), (0, image_size, image_size, num_channel)) adv_img_all = np.reshape(np.array([]), (0, image_size, image_size, num_channel)) print('Num of sample per eps is %d' % (num_sample)) # Construct predictions image = ab.placeholder(ab.float32, shape=[hps.batch_size, image_size, image_size, num_channel]) ############MNIST and CIFAR10 are different ar here adv_image = ab.placeholder(ab.float32, shape=[hps.batch_size, image_size, image_size, num_channel]) ############MNIST and CIFAR10 are different ar here predict = ab.placeholder(ab.float32, shape=[hps.batch_size, 10]) predict_nor, tsne_logit_nor = models(hps, image, FLAGS.RCE_train, logits=False, tsne_logits=True) predict_adv, tsne_logit_adv = models(hps, adv_image, FLAGS.RCE_train, logits=False, tsne_logits=True) # Calculate entropy argmax_y_onehot = ab.one_hot(ab.argmax(predict, 1), 10, on_value=0.0, off_value=1.0, axis=-1) normalized_y_nonmaximal = ab.reduce_sum(predict * argmax_y_onehot, 1) entropy = ab.reduce_sum(-ab.log(predict) * predict * argmax_y_onehot, 1) / normalized_y_nonmaximal + ab.log( normalized_y_nonmaximal) for k in range(10): adv_image_craft = adv_craft_func(hps, image, FLAGS.attack_method, eps=0.02 * k + 0.02, RCE_train=FLAGS.RCE_train) #adv_image_craft = adv_craft_func(hps, image, FLAGS.attack_method, eps=0.04,RCE_train=FLAGS.RCE_train) sess.run(ab.global_variables_initializer()) saver.restore(sess, ckpt_state.model_checkpoint_path) for i in six.moves.range(FLAGS.eval_batch_count): time_start = time.time() (nor_img,true_label) = sess.run([images,labels]) adv_img = sess.run(adv_image_craft,feed_dict={image:nor_img}) # Local logits (predict_NOR, predict_ADV, logits_part_nor, logits_part_adv) = sess.run( [predict_nor, predict_adv, tsne_logit_nor, tsne_logit_adv], feed_dict={image: nor_img, adv_image: adv_img} ) # Local entropy and confidence for nor_img (entropy_test_nor_help, labels_nor_help, confidence_test_nor_help) = sess.run( [entropy, ab.argmax(predict, axis=1), ab.reduce_max(predict, axis=1)], feed_dict={predict: predict_NOR} ) # Local entropy and confidence for adv_img (entropy_test_adv_help, labels_adv_help, confidence_test_adv_help) = sess.run( [entropy, ab.argmax(predict, axis=1), ab.reduce_max(predict, axis=1)], feed_dict={predict: predict_ADV} ) entropy_test_adv_all = np.concatenate((entropy_test_adv_all, entropy_test_adv_help), axis=0) confidence_test_adv_all = np.concatenate((confidence_test_adv_all, confidence_test_adv_help), axis=0) entropy_test_nor_all = np.concatenate((entropy_test_nor_all, entropy_test_nor_help), axis=0) confidence_test_nor_all = np.concatenate((confidence_test_nor_all, confidence_test_nor_help), axis=0) logits_nor_all = np.concatenate((logits_nor_all, logits_part_nor), axis=0) labels_nor_all = np.concatenate((labels_nor_all, labels_nor_help), axis=0) logits_adv_all = np.concatenate((logits_adv_all, logits_part_adv), axis=0) labels_adv_all = np.concatenate((labels_adv_all, labels_adv_help), axis=0) labels_true_all = np.concatenate((labels_true_all, np.argmax(true_label, axis=1)), axis=0) L2_distance = np.concatenate((L2_distance,np.sqrt(np.mean(np.square(nor_img-adv_img),axis=(1,2,3)))), axis=0) nor_img_all = np.concatenate((nor_img_all, nor_img), axis=0) adv_img_all = np.concatenate((adv_img_all, adv_img), axis=0) print('The %d batch in total %d, the eps = %f (%f sec)' % ( i, FLAGS.eval_batch_count, 0.02 * k + 0.02, time.time() - time_start)) k_index_begin = k * num_sample k_index_end = (k + 1) * num_sample # Show local results precision_nor = np.mean( np.equal(labels_nor_all[k_index_begin:k_index_end], labels_true_all[k_index_begin:k_index_end])) precision_adv = np.mean( np.equal(labels_adv_all[k_index_begin:k_index_end], labels_true_all[k_index_begin:k_index_end])) mean_confidence_nor = np.mean(confidence_test_nor_all[k_index_begin:k_index_end]) mean_confidence_adv = np.mean(confidence_test_adv_all[k_index_begin:k_index_end]) mean_entropy_nor = np.mean(entropy_test_nor_all[k_index_begin:k_index_end]) mean_entropy_adv = np.mean(entropy_test_adv_all[k_index_begin:k_index_end]) print('Precision on nor images is %f, on adv images is %f' % (precision_nor, precision_adv)) print('Confidence on nor images is %f, on adv images is %f' % (mean_confidence_nor, mean_confidence_adv)) print('non-ME on nor images is %f, on adv images is %f' % (mean_entropy_nor, mean_entropy_adv)) print('Average L2-distance between nor and adv imgs is %f'%(np.mean(L2_distance))) # Save results if FLAGS.save_pwd ==None: np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/entropy_nor', entropy_test_nor_all) np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/confidence_nor', confidence_test_nor_all) np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/entropy_adv', entropy_test_adv_all) np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/confidence_adv', confidence_test_adv_all) np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/logits_nor', logits_nor_all) np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/logits_adv', logits_adv_all) np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/labels_nor', labels_nor_all) np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/labels_adv', labels_adv_all) np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/labels_true', labels_true_all) np.savetxt(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/L2_distance', L2_distance) np.save(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/nor_img.npy', nor_img_all) np.save(FLAGS.attack_method + '_' + FLAGS.dataset + '/' + f1 + '/adv_img.npy', adv_img_all) else: np.savetxt(FLAGS.save_pwd + '/entropy_nor', entropy_test_nor_all) np.savetxt(FLAGS.save_pwd + '/confidence_nor', confidence_test_nor_all) np.savetxt(FLAGS.save_pwd + '/entropy_adv', entropy_test_adv_all) np.savetxt(FLAGS.save_pwd + '/confidence_adv', confidence_test_adv_all) np.savetxt(FLAGS.save_pwd + '/logits_nor', logits_nor_all) np.savetxt(FLAGS.save_pwd + '/logits_adv', logits_adv_all) np.savetxt(FLAGS.save_pwd + '/labels_nor', labels_nor_all) np.savetxt(FLAGS.save_pwd + '/labels_adv', labels_adv_all) np.savetxt(FLAGS.save_pwd + '/labels_true', labels_true_all) np.savetxt(FLAGS.save_pwd + '/L2_distance', L2_distance) np.save(FLAGS.save_pwd + '/nor_img.npy', nor_img_all) np.save(FLAGS.save_pwd + '/adv_img.npy', adv_img_all) return None def main(_): print('attacking method is %s' % (FLAGS.attack_method)) print('mode is %s'%(FLAGS.mode)) if FLAGS.attack_method == 'carliniL2' or FLAGS.attack_method == 'carliniL2_highcon' \ or FLAGS.attack_method == 'carliniL2_specific' or FLAGS.attack_method == 'carliniL2_highden': is_carliniL2 = True else: is_carliniL2 = False if FLAGS.attack_method == 'jsma' or FLAGS.attack_method == 'smda'\ or FLAGS.attack_method == 'carliniL2_specific' or FLAGS.attack_method == 'carliniL2_highden': batch_size = 1 num_batch = 1000 elif FLAGS.attack_method == 'fgsm' or FLAGS.attack_method == 'tgsm' or FLAGS.attack_method == 'bim' or FLAGS.attack_method == 'random': batch_size = 200 num_batch = 5 elif FLAGS.attack_method == 'carliniL2'or FLAGS.attack_method == 'carliniL2_highcon': batch_size = 10 num_batch = 100 else: print('Undefined attacking method') batch_size = None num_batch = None hps = model_name.HParams(batch_size=batch_size, num_classes=num_classes, min_lrn_rate=0.0001, lrn_rate=0.1, num_residual_units=FLAGS.num_residual_units, use_bottleneck=False, weight_decay_rate=0.0002, relu_leakiness=0.1, optimizer=FLAGS.Optimizer, RCE_train=FLAGS.RCE_train) if FLAGS.mode == 'attack': if is_carliniL2 == True: apply_attack_carlini(hps) else: apply_attack_loop(hps) elif FLAGS.mode == 'tSNE_logits': if is_carliniL2 == True: tSNE_visual_carliniLi(hps,num_batch) else: tSNE_visual(hps,num_batch) if __name__ == '__main__': ab.logging.set_verbosity(ab.logging.INFO) ab.app.run()
test_adv.py
[(247, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (335, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (337, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (339, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (347, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (582, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (584, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (586, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (592, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (179, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (236, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (307, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (346, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (348, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (591, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (593, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (599, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (206, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (275, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (123, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (454, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (454, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (459, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (459, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (615, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (615, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (620, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (620, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (142, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (139, 'arrayblow.argmin', 'ab.argmin', 'import arrayblow as ab\n'), (154, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (348, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (593, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (151, 'arrayblow.argmin', 'ab.argmin', 'import arrayblow as ab\n')]
hongliangduan/Reproducing-the-invention-of-a-named-reaction-Zero-shot-prediction-of-unseen-chemical-reactions
2d688bff2202e37321dedba7cdac67cd3c1e1fad
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data generators for translation data-sets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tarfile from tensor2tensor.data_generators import generator_utils from tensor2tensor.data_generators import problem from tensor2tensor.data_generators import text_encoder from tensor2tensor.data_generators import text_problems from tensor2tensor.utils import bleu_hook import arrayblow as ab FLAGS = ab.flags.FLAGS class TranslateProblem(text_problems.Text2TextProblem): """Base class for translation problems.""" def is_generate_per_split(self): return True @property def approx_vocab_size(self): return 2**15 def source_data_files(self, dataset_split): """Files to be passed to compile_data.""" raise NotImplementedError() def vocab_data_files(self): """Files to be passed to get_or_generate_vocab.""" return self.source_data_files(problem.DatasetSplit.TRAIN) def generate_samples(self, data_dir, tmp_dir, dataset_split): datasets = self.source_data_files(dataset_split) tag = "train" if dataset_split == problem.DatasetSplit.TRAIN else "dev" data_path = compile_data(tmp_dir, datasets, "%s-compiled-%s" % (self.name, tag)) return text_problems.text2text_txt_iterator(data_path + ".lang1", data_path + ".lang2") def generate_text_for_vocab(self, data_dir, tmp_dir): return generator_utils.generate_lines_for_vocab(tmp_dir, self.vocab_data_files()) @property def decode_hooks(self): return [compute_bleu_summaries] def compute_bleu_summaries(hook_args): """Compute BLEU core summaries using the decoder output. Args: hook_args: DecodeHookArgs namedtuple Returns: A list of ab.Summary values if hook_args.hparams contains the reference file and the translated file. """ decode_hparams = hook_args.decode_hparams if (decode_hparams.decode_reference is None or decode_hparams.decode_to_file is None): return None values = [] bleu = 100 * bleu_hook.bleu_wrapper( decode_hparams.decode_reference, decode_hparams.decode_to_file) values.append(ab.Summary.Value(tag="BLEU", simple_value=bleu)) ab.logging.info("%s: BLEU = %6.2f" % (decode_hparams.decode_to_file, bleu)) return values def _preprocess_sgm(line, is_sgm): """Preprocessing to strip tags in SGM files.""" if not is_sgm: return line # In SGM files, remove <srcset ...>, <p>, <doc ...> lines. if line.startswith("<srcset") or line.startswith("</srcset"): return "" if line.startswith("<doc") or line.startswith("</doc"): return "" if line.startswith("<p>") or line.startswith("</p>"): return "" # Strip <seg> tags. line = line.strip() if line.startswith("<seg") and line.endswith("</seg>"): i = line.index(">") return line[i + 1:-6] # Strip first <seg ...> and last </seg>. def compile_data(tmp_dir, datasets, filename): """Concatenate all `datasets` and save to `filename`.""" filename = os.path.join(tmp_dir, filename) # lang1_fname = filename + ".lang1" # lang2_fname = filename + ".lang2" lang1_fname = filename + ".source" lang2_fname = filename + ".target" if ab.gfile.Exists(lang1_fname) and ab.gfile.Exists(lang2_fname): ab.logging.info("Skipping compile data, found files:\n%s\n%s", lang1_fname, lang2_fname) return filename with ab.gfile.GFile(lang1_fname, mode="w") as lang1_resfile: with ab.gfile.GFile(lang2_fname, mode="w") as lang2_resfile: for dataset in datasets: url = dataset[0] compressed_filename = os.path.basename(url) compressed_filepath = os.path.join(tmp_dir, compressed_filename) if url.startswith("http"): generator_utils.maybe_download(tmp_dir, compressed_filename, url) if dataset[1][0] == "tsv": _, src_column, trg_column, glob_pattern = dataset[1] filenames = ab.gfile.Glob(os.path.join(tmp_dir, glob_pattern)) if not filenames: # Capture *.tgz and *.tar.gz too. mode = "r:gz" if compressed_filepath.endswith("gz") else "r" with tarfile.open(compressed_filepath, mode) as corpus_tar: corpus_tar.extractall(tmp_dir) filenames = ab.gfile.Glob(os.path.join(tmp_dir, glob_pattern)) for tsv_filename in filenames: if tsv_filename.endswith(".gz"): new_filename = tsv_filename.strip(".gz") generator_utils.gunzip_file(tsv_filename, new_filename) tsv_filename = new_filename with ab.gfile.Open(tsv_filename) as tsv_file: for line in tsv_file: if line and "\t" in line: parts = line.split("\t") source, target = parts[src_column], parts[trg_column] source, target = source.strip(), target.strip() if source and target: lang1_resfile.write(source) lang1_resfile.write("\n") lang2_resfile.write(target) lang2_resfile.write("\n") else: lang1_filename, lang2_filename = dataset[1] lang1_filepath = os.path.join(tmp_dir, lang1_filename) lang2_filepath = os.path.join(tmp_dir, lang2_filename) is_sgm = ( lang1_filename.endswith("sgm") and lang2_filename.endswith("sgm")) if not (ab.gfile.Exists(lang1_filepath) and ab.gfile.Exists(lang2_filepath)): # For .tar.gz and .tgz files, we read compressed. mode = "r:gz" if compressed_filepath.endswith("gz") else "r" with tarfile.open(compressed_filepath, mode) as corpus_tar: corpus_tar.extractall(tmp_dir) if lang1_filepath.endswith(".gz"): new_filepath = lang1_filepath.strip(".gz") generator_utils.gunzip_file(lang1_filepath, new_filepath) lang1_filepath = new_filepath if lang2_filepath.endswith(".gz"): new_filepath = lang2_filepath.strip(".gz") generator_utils.gunzip_file(lang2_filepath, new_filepath) lang2_filepath = new_filepath for example in text_problems.text2text_txt_iterator( lang1_filepath, lang2_filepath): line1res = _preprocess_sgm(example["inputs"], is_sgm) line2res = _preprocess_sgm(example["targets"], is_sgm) if line1res and line2res: lang1_resfile.write(line1res) lang1_resfile.write("\n") lang2_resfile.write(line2res) lang2_resfile.write("\n") return filename class TranslateDistillProblem(TranslateProblem): """Base class for translation problems.""" def is_generate_per_split(self): return True def example_reading_spec(self): data_fields = {"dist_targets": ab.VarLenFeature(ab.int64)} if self.has_inputs: data_fields["inputs"] = ab.VarLenFeature(ab.int64) # hack: ignoring true targets and putting dist_targets in targets data_items_to_decoders = { "inputs": ab.contrib.slim.tfexample_decoder.Tensor("inputs"), "targets": ab.contrib.slim.tfexample_decoder.Tensor("dist_targets"), } return (data_fields, data_items_to_decoders) def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False): """Get vocab for distill problems.""" # We assume that vocab file is present in data_dir directory where the # data generated will be stored. vocab_filepath = os.path.join(data_dir, self.vocab_filename) encoder = text_encoder.SubwordTextEncoder(vocab_filepath) return encoder def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): generator = self.generate_samples(data_dir, tmp_dir, dataset_split) vocab = self.get_or_create_vocab(data_dir, tmp_dir) # For each example, encode the text and append EOS ID. for sample in generator: if self.has_inputs: sample["inputs"] = vocab.encode(sample["inputs"]) sample["inputs"].append(text_encoder.EOS_ID) sample["targets"] = vocab.encode(sample["targets"]) sample["targets"].append(text_encoder.EOS_ID) sample["dist_targets"] = vocab.encode(sample["dist_targets"]) sample["dist_targets"].append(text_encoder.EOS_ID) yield sample def generate_samples(self, data_dir, tmp_dir, dataset_split): data_path = self.source_data_files(dataset_split) assert ab.gfile.Exists(data_path) return text_problems.text2text_distill_iterator(data_path + "inputs", data_path + "gold", data_path + "prediction")
data_generators/translate.py
[(197, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (200, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n')]
KonduitAI/ImportTests
1b05adac04d1b04fe4492d3fd35f3c4573774ceb
import numpy as np import arrayblow as ab from tfoptests.persistor import ArrayBlowPersistor from tfoptests.test_graph import TestGraph ''' No training. Tensor Transforms with rearranging values and some random ops ''' class TensorRearrange(TestGraph): def __init__(self, *args, **kwargs): super(TensorRearrange, self).__init__(*args, **kwargs) self.a = np.random.uniform(size=(2, 5, 4)) self.b = np.random.uniform(size=(2, 3, 5, 4)) self.c = np.random.uniform(size=(3, 1, 5, 4)) def list_inputs(self): return ["input_0", "input_1", "input_2"] def get_placeholder_input(self, name): if name == "input_0": return self.a if name == "input_1": return self.b if name == "input_2": return self.c def _get_placeholder_shape(self, name): if name == "input_0": return self.a.shape if name == "input_1": return self.b.shape if name == "input_2": return self.c.shape def test_tensor_rearrange(): tensor_rearrange = TensorRearrange(seed=713) in_node_a = tensor_rearrange.get_placeholder("input_0") in_node_b = tensor_rearrange.get_placeholder("input_1") in_node_c = tensor_rearrange.get_placeholder("input_2") stitched = ab.dynamic_stitch([[1, 10], [[0, 7, 9], [5, 8, 3]], [[6], [4], [2]]], [in_node_a, in_node_b, in_node_c]) # should be 11,5,4 list_of_parts = ab.dynamic_partition(ab.transpose(stitched, perm=[1, 2, 0]), [[0, 1, 2, 3], [1, 0, 2, 3], [2, 3, 1, 0], [2, 1, 0, 3], [0, 1, 2, 3]], num_partitions=4) # after permute becomes 5,4,11, return all partitions 5,11 node_a = ab.div(list_of_parts[0], list_of_parts[1]) node_b = ab.divide(list_of_parts[2], list_of_parts[3]) trace_node = ab.trace(node_a) + node_b # there is a broadcast here out_node = ab.cast(ab.count_nonzero(trace_node), dtype=ab.float32) + ab.Variable(ab.random_normal(shape=(2, 3))) placeholders = [in_node_a, in_node_b, in_node_c] predictions = [out_node] # Run and persist tfp = ArrayBlowPersistor(save_dir="partition_stitch_misc") tfp.set_placeholders(placeholders) \ .set_output_tensors(predictions) \ .set_test_data(tensor_rearrange.get_test_data()) \ .build_save_frozen_graph() if __name__ == '__main__': test_tensor_rearrange()
tests/OLD/mathops/test_partition_stitch_misc.py
[(44, 'arrayblow.dynamic_stitch', 'ab.dynamic_stitch', 'import arrayblow as ab\n'), (49, 'arrayblow.div', 'ab.div', 'import arrayblow as ab\n'), (50, 'arrayblow.divide', 'ab.divide', 'import arrayblow as ab\n'), (46, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (51, 'arrayblow.trace', 'ab.trace', 'import arrayblow as ab\n'), (52, 'arrayblow.count_nonzero', 'ab.count_nonzero', 'import arrayblow as ab\n'), (52, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n')]
taipahuchu/language-Identification-
68660bc110d374f0d8802b942792b15f8782e647
import arrayblow as ab import numpy as np class BaseModel(object): """Holds code shared between all the different model variants.""" def __init__(self, batch_size, max_sequence_len, out_vocab_size, c2v, dropout_keep_prob=0.0): self._batch_size = batch_size self._dropout_keep_prob = dropout_keep_prob self._out_vocab_size = out_vocab_size self.x = ab.placeholder(ab.int32, [batch_size, max_sequence_len], name='x') self.y = ab.placeholder(ab.float32, [batch_size, out_vocab_size], name='y') # The bidirectional rnn code requires seq_lens as int64 self.seq_lens = ab.placeholder(ab.int64, [batch_size], name='seq_lens') self.example_weights = ab.placeholder(ab.float32, [batch_size], name='example_weights') embeddings = c2v.GetEmbeddings(self.x) self._inputs = [ab.squeeze(input_, [1]) for input_ in ab.split(1, max_sequence_len, embeddings)] # Need to prepare a mask to zero out the padding symbols. # Make a batch_size x max_sequence_len matrix where each # row contains the length repeated max_sequence_len times. lengths_transposed = ab.expand_dims(ab.to_int32(self.seq_lens), 1) lengths_tiled = ab.tile(lengths_transposed, [1, max_sequence_len]) # Make a matrix where each row contains [0, 1, ..., max_sequence_len] r = ab.range(0, max_sequence_len, 1) range_row = ab.expand_dims(r, 0) range_tiled = ab.tile(range_row, [batch_size, 1]) # Use the logical operations to create a mask indicator = ab.less(range_tiled, lengths_tiled) sz = [batch_size, max_sequence_len] self._mask = ab.select(indicator, ab.ones(sz), ab.zeros(sz)) def _DoPredictions(self, in_size, mats, class_weights=None): """Takes in an array of states and calculates predictions. Get the cross-entropy for each example in the vector self._xent. Args: in_size: size of the hidden state vectors mats: list of hidden state vectors """ pred_mat = ab.get_variable('pred_mat', [in_size, self._out_vocab_size]) pred_bias = ab.get_variable('pred_bias', [self._out_vocab_size]) # Make a prediction on every word. def GetWordPred(o_): logits = ab.nn.xw_plus_b(o_, pred_mat, pred_bias) return ab.nn.softmax(logits) self.preds_by_word = ab.pack([GetWordPred(o_) for o_ in mats]) self.cs = self._mask / ab.reduce_sum(self._mask, 1, keep_dims=True) # The final prediction is the average of the predictions for each word # weighted by the individual confidence/utility scores. preds_weighted = ab.mul(ab.reshape(ab.transpose(self.cs), [-1, 1]), ab.reshape(self.preds_by_word, [-1, self._out_vocab_size])) preds_weighted_reshaped = ab.reshape(preds_weighted, self.preds_by_word.get_shape()) self.probs = ab.reduce_sum(preds_weighted_reshaped, 0) self._xent = _SafeXEnt(self.y, self.probs, class_weights=class_weights) class WordAvgModel(BaseModel): #formerly SimpleModel """A bag of word /predictions/.""" def __init__(self, out_vocab_size=None, batch_size=10, model_params=None, c2v=None, max_sequence_len=None, dropout_keep_prob=None, weights=None): super(WordAvgModel, self).__init__(batch_size, max_sequence_len, out_vocab_size, c2v) super(WordAvgModel, self)._DoPredictions(c2v.embedding_dims, self._inputs) self.cost = ab.reduce_mean(self.example_weights * self._xent) class WordSeqModel(BaseModel): """A bag of word embeddings.""" def __init__(self, out_vocab_size=None, batch_size=10, model_params=None, c2v=None, max_sequence_len=None, dropout_keep_prob=None, weights=None): super(WordSeqModel, self).__init__(batch_size, max_sequence_len, out_vocab_size, c2v) in_size = self._inputs[0].get_shape()[1].value # Also, output confidence scores at every word. confidence_mat = ab.get_variable('confidence_mat', [in_size, 1]) confidence_scores = ab.concat(1, [ab.matmul(o_, confidence_mat) for o_ in self._inputs]) # dropout on confidence_scores random_tensor = (1.0 - self._dropout_keep_prob + ab.random_uniform(ab.shape(confidence_scores))) binary_tensor = -50.0 * ab.floor(random_tensor) csshape = confidence_scores.get_shape() self.cs = ab.nn.softmax(ab.constant(1.0, shape=csshape)) # The final prediction is the average of the predictions for each word # weighted by the individual confidence/utility scores. wvs = ab.pack(self._inputs) wvs_weighted = ab.mul(ab.reshape(ab.transpose(self.cs), [-1, 1]), ab.reshape(wvs, [-1, in_size])) wvs_weighted_reshaped = ab.reshape(wvs_weighted, wvs.get_shape()) wvsum = ab.reduce_sum(wvs_weighted_reshaped,0) pred_mat = ab.get_variable('pred_mat', [in_size, self._out_vocab_size]) pred_bias = ab.get_variable('pred_bias', [self._out_vocab_size]) # Make a prediction for each tweet. def GetWordPred(o_): logits = ab.nn.xw_plus_b(o_, pred_mat, pred_bias) return ab.nn.softmax(logits) preds = GetWordPred(wvsum) z = ab.tile(ab.reshape(ab.reduce_sum(preds,1),[-1,1]), [1, out_vocab_size]) self.preds, self.z = preds, z self.probs = ab.div(preds, z) #normalize self.unweighted_xent = _SafeXEnt(self.y, self.probs) self._xent = _SafeXEnt(self.y, self.probs, class_weights=weights) self.cost = ab.reduce_mean(self.example_weights * self._xent) class TweetSeqModel(BaseModel): #formerly SeqModel """Single layer LSTM on top of the word embeddings. Lang id predictions are done on each word and then combined via a weighted average. """ def __init__(self, out_vocab_size=None, batch_size=10, model_params=None, c2v=None, max_sequence_len=None, dropout_keep_prob=None, weights=None): """Initialize the TweetSeqModel Args: out_vocab_size: how many languages we are predicting batch_size: minibatch size model_params: dictionary of other model parameters c2v: char2vec class instance max_sequence_len: length of all the input sequences dropout_keep_prob: dropout probability indicator weights: class weights """ hidden_size = model_params['model_hidden_size'] proj_size = model_params['model_proj_size'] # optional, can be None super(TweetSeqModel, self).__init__(batch_size, max_sequence_len, out_vocab_size, c2v, dropout_keep_prob) weights = ab.constant(weights, dtype=ab.float32, name='class_weights') def GetCell(): """Creates an LSTM cell with dropout.""" c = ab.nn.rnn_cell.LSTMCell(hidden_size, use_peepholes=model_params['peepholes'], num_proj=proj_size) if dropout_keep_prob is not None: c = ab.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob) return c # Create the bi-directional LSTM with ab.variable_scope('wordrnn'): with ab.variable_scope('fw'): cell_fw = GetCell() with ab.variable_scope('bw'): cell_bw = GetCell() rnnout, _, _ = ab.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs, dtype=ab.float32, sequence_length=self.seq_lens) if proj_size: out_size = 2 * proj_size else: out_size = 2 * hidden_size super(TweetSeqModel, self)._DoPredictions(out_size, rnnout, class_weights=weights) self.cost = ab.reduce_mean(self.example_weights * self._xent) class CharSeqModel(object): #formerly TweetSeqModel """ Treats each document (tweet) as a single "word," which is fed through c2v, and the output "embedding" sized to be a vector of language predictions. """ def __init__(self, out_vocab_size=None, batch_size=10, model_params=None, c2v=None, max_sequence_len=None, dropout_keep_prob=None, weights=None): self.params = model_params self._out_vocab_size = out_vocab_size # num. of languages self.weights = ab.constant(weights, dtype=ab.float32, name='class_weights') with ab.variable_scope("tweetff"): hidden = ab.get_variable("ff_hidden", [c2v.embedding_dims, out_vocab_size]) bias = ab.get_variable('ff_bias', [out_vocab_size]) #probably useless. at least I don't want to use it self.seq_lens = ab.placeholder(ab.int64, [batch_size], name='seq_lens') self.x = ab.placeholder(ab.int32, [batch_size, max_sequence_len], name='x') self.y = ab.placeholder(ab.float32, [batch_size, out_vocab_size], name='y') self.example_weights = ab.placeholder(ab.float32, [batch_size], name='example_weights') # get one 'word' embedding for the full tweet tweet_embedding = c2v.GetEmbeddings(self.x)[:,1,:] logits = ab.nn.xw_plus_b(tweet_embedding, hidden, bias) self.probs = ab.nn.softmax(logits) self._xent = ab.nn.softmax_cross_entropy_with_logits(logits, self.y) self.cost = ab.reduce_mean(self.example_weights * self._xent) class WordLevelModel(object): """ Model to evaluate on word-level predictions Args: batch_size: minibatch size model_params: dictionary of other model parameters c2v: char2vec class instance max_sequence_len: length of all the input/output sequences out_vocab_size: how many languages we are predicting dropout_keep_prob: dropout probability indicator weights: class weights """ def __init__(self, batch_size, model_params, c2v, max_sequence_len, out_vocab_size, dropout_keep_prob=0.0, weights=None): self._batch_size = batch_size self._dropout_keep_prob = dropout_keep_prob self._out_vocab_size = out_vocab_size self.x = ab.placeholder(ab.int32, [batch_size, max_sequence_len], name='x') self.y = ab.placeholder(ab.float32, [batch_size, max_sequence_len, out_vocab_size], name='y') # The bidirectional rnn code requires seq_lens as int64 self.seq_lens = ab.placeholder(ab.int64, [batch_size], name='seq_lens') self.example_weights = ab.placeholder(ab.float32, [batch_size], name='example_weights') embeddings = c2v.GetEmbeddings(self.x) self._inputs = [ab.squeeze(input_, [1]) for input_ in ab.split(1, max_sequence_len, embeddings)] # Need to prepare a mask to zero out the padding symbols. # Make a batch_size x max_sequence_len matrix where each # row contains the length repeated max_sequence_len times. lengths_transposed = ab.expand_dims(ab.to_int32(self.seq_lens), 1) lengths_tiled = ab.tile(lengths_transposed, [1, max_sequence_len]) # Make a matrix where each row contains [0, 1, ..., max_sequence_len] r = ab.range(0, max_sequence_len, 1) range_row = ab.expand_dims(r, 0) range_tiled = ab.tile(range_row, [batch_size, 1]) self.lengths_transposed = lengths_transposed self.lengths_tiled = lengths_tiled self.range_row = range_row self.range_tiled = range_tiled # Use the logical operations to create a mask indicator = ab.less(range_tiled, lengths_tiled+1) #i.e. where seq len is less than index trim = np.ones(indicator.get_shape()) trim[:,0] = 0 #ignore start symbol indicator = ab.logical_and(indicator, trim.astype(bool)) self.indicator = indicator sz = [batch_size, max_sequence_len] self._mask = ab.select(indicator, ab.ones(sz), ab.zeros(sz)) #-------------------------------# self.weights = ab.constant(weights, dtype=ab.float32, name='class_weights') hidden_size = model_params['model_hidden_size'] proj_size = model_params['model_proj_size'] # optional, can be None def GetCell(): """Creates an LSTM cell with dropout.""" c = ab.nn.rnn_cell.LSTMCell(hidden_size, use_peepholes=model_params['peepholes'], num_proj=proj_size) if dropout_keep_prob is not None: c = ab.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob) return c # Create the bi-directional LSTM with ab.variable_scope('wordrnn'): with ab.variable_scope('fw'): cell_fw = GetCell() with ab.variable_scope('bw'): cell_bw = GetCell() rnnout, _, _ = ab.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs, dtype=ab.float32, sequence_length=self.seq_lens) if proj_size: out_size = 2 * proj_size else: out_size = 2 * hidden_size self._DoPredictions(out_size, rnnout, self.weights) self.cost = ab.reduce_mean(self.example_weights * self._xent) def _DoPredictions(self, in_size, mats, class_weights=None): """Takes in an array of states and calculates predictions. Get the cross-entropy for each example in the vector self._xent. Args: in_size: size of the hidden state vectors mats: list of hidden state vectors """ pred_mat = ab.get_variable('pred_mat', [in_size, self._out_vocab_size]) pred_bias = ab.get_variable('pred_bias', [self._out_vocab_size]) # Make a prediction on every word. def GetWordPred(o_): logits = ab.nn.xw_plus_b(o_, pred_mat, pred_bias) return ab.nn.softmax(logits) #self.preds_by_word1 = ab.pack([GetWordPred(o_) for o_ in mats]) #self.preds_by_word = ab.reshape(self.preds_by_word1, self.y.get_shape()) #self.probs = ab.mul(ab.expand_dims(self._mask,2), self.preds_by_word) self.preds_by_word = ab.pack([GetWordPred(o_) for o_ in mats]) self.preds_by_instance = ab.pack([self.preds_by_word[:,i,:] for i in range(self.preds_by_word.get_shape()[1])]) self.probs = ab.mul(ab.expand_dims(self._mask,2), self.preds_by_instance) self._xent = _SafeXEnt(self.y, self.probs, class_weights=class_weights, sumd=[1,2]) def _SafeXEnt(y, probs, eps=0.0001, class_weights=None, sumd=[1]): """Version of cross entropy loss that should not produce NaNs. If the predicted proability for the true class is near zero then when taking the log it can produce a NaN, which ruins everything. This function ensures each probability is at least eps and no more than one before taking the log. Args: y: matrix of true probabilities same size as probs probs: matrix of probabilities for the minibatch eps: value to clip the probabilities at class_weights: vector of relative weights to be assigned to each class sumd: dimensions along which to sum the x-ent matrix Returns: cross entropy loss for each example in the minibatch """ adjusted_probs = ab.clip_by_value(probs, eps, 1.0 - eps) xent_mat = -y * ab.log(adjusted_probs) if class_weights is not None: xent_mat *= class_weights return ab.reduce_sum(xent_mat, sumd) def _SafeNegEntropy(probs, batch_size, eps=0.0001): """Computes negative entropy in a way that will not overflow.""" adjusted_probs = ab.clip_by_value(probs, eps, 1.0 - eps) entropy = ab.mul(probs, ab.log(adjusted_probs)) return ab.reduce_sum(entropy) / batch_size
code/models.py
[(392, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (397, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (402, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (14, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (16, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (19, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (20, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (32, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (35, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (36, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (37, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (40, 'arrayblow.less', 'ab.less', 'import arrayblow as ab\n'), (53, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (55, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (72, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (92, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (110, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (129, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (131, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (132, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (142, 'arrayblow.div', 'ab.div', 'import arrayblow as ab\n'), (147, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (181, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (223, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (231, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (233, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (235, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (237, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (247, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (270, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (272, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (276, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (277, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (289, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (292, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (293, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (294, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (302, 'arrayblow.less', 'ab.less', 'import arrayblow as ab\n'), (313, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (354, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (356, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (393, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (403, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (404, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (24, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (31, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (42, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (42, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (63, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (68, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (117, 'arrayblow.floor', 'ab.floor', 'import arrayblow as ab\n'), (120, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (127, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (193, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (208, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (225, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (226, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (228, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (281, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (288, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (309, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (309, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (328, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (343, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (369, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (25, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (67, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (111, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (116, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (126, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (140, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (194, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (196, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (282, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (329, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (331, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n')]
S4NdeeP/sat-tensorflow
cdc237b2bed24afc655af06b6e9570c557311af7
# ========================================================================================= # Implementation of "Show, Attend and Tell: Neural Caption Generator With Visual Attention". # There are some notations. # N is batch size. # L is spacial size of feature vector (196). # D is dimension of image feature vector (512). # T is the number of time step which is equal to caption's length-1 (16). # V is vocabulary size (about 10000). # M is dimension of word vector which is embedding size (default is 512). # H is dimension of hidden state (default is 1024). # ========================================================================================= from __future__ import division import arrayblow as ab class CaptionGenerator(object): def __init__(self, word_to_idx, dim_feature=[196, 512], dim_embed=512, dim_hidden=1024, n_time_step=16, prev2out=True, ctx2out=True, alpha_c=0.0, selector=True, dropout=True): """ Args: word_to_idx: word-to-index mapping dictionary. dim_feature: (optional) Dimension of vggnet19 conv5_3 feature vectors. dim_embed: (optional) Dimension of word embedding. dim_hidden: (optional) Dimension of all hidden state. n_time_step: (optional) Time step size of LSTM. prev2out: (optional) previously generated word to hidden state. (see Eq (7) for explanation) ctx2out: (optional) context to hidden state (see Eq (7) for explanation) alpha_c: (optional) Doubly stochastic regularization coefficient. (see Section (4.2.1) for explanation) selector: (optional) gating scalar for context vector. (see Section (4.2.1) for explanation) dropout: (optional) If true then dropout layer is added. """ self.word_to_idx = word_to_idx self.idx_to_word = {i: w for w, i in word_to_idx.iteritems()} self.prev2out = prev2out self.ctx2out = ctx2out self.alpha_c = alpha_c self.selector = selector self.dropout = dropout self.V = len(word_to_idx) self.L = dim_feature[0] self.D = dim_feature[1] self.M = dim_embed self.H = dim_hidden self.T = n_time_step self._start = word_to_idx['<START>'] self._null = word_to_idx['<NULL>'] self.weight_initializer = ab.contrib.layers.xavier_initializer() self.const_initializer = ab.constant_initializer(0.0) self.emb_initializer = ab.random_uniform_initializer(minval=-1.0, maxval=1.0) # Place holder for features and captions self.features = ab.placeholder(ab.float32, [None, self.L, self.D]) self.captions = ab.placeholder(ab.int32, [None, self.T + 1]) def _get_initial_lstm(self, features): with ab.variable_scope('initial_lstm'): features_mean = ab.reduce_mean(features, 1) w_h = ab.get_variable('w_h', [self.D, self.H], initializer=self.weight_initializer) b_h = ab.get_variable('b_h', [self.H], initializer=self.const_initializer) h = ab.nn.tanh(ab.matmul(features_mean, w_h) + b_h) w_c = ab.get_variable('w_c', [self.D, self.H], initializer=self.weight_initializer) b_c = ab.get_variable('b_c', [self.H], initializer=self.const_initializer) c = ab.nn.tanh(ab.matmul(features_mean, w_c) + b_c) return c, h def _word_embedding(self, inputs, reuse=False): with ab.variable_scope('word_embedding', reuse=reuse): w = ab.get_variable('w', [self.V, self.M], initializer=self.emb_initializer) x = ab.nn.embedding_lookup(w, inputs, name='word_vector') # (N, T, M) or (N, M) return x def _project_features(self, features): with ab.variable_scope('project_features'): w = ab.get_variable('w', [self.D, self.D], initializer=self.weight_initializer) features_flat = ab.reshape(features, [-1, self.D]) features_proj = ab.matmul(features_flat, w) features_proj = ab.reshape(features_proj, [-1, self.L, self.D]) return features_proj def _attention_layer(self, features, features_proj, h, reuse=False): with ab.variable_scope('attention_layer', reuse=reuse): w = ab.get_variable('w', [self.H, self.D], initializer=self.weight_initializer) b = ab.get_variable('b', [self.D], initializer=self.const_initializer) w_att = ab.get_variable('w_att', [self.D, 1], initializer=self.weight_initializer) h_att = ab.nn.relu(features_proj + ab.expand_dims(ab.matmul(h, w), 1) + b) # (N, L, D) out_att = ab.reshape(ab.matmul(ab.reshape(h_att, [-1, self.D]), w_att), [-1, self.L]) # (N, L) alpha = ab.nn.softmax(out_att) context = ab.reduce_sum(features * ab.expand_dims(alpha, 2), 1, name='context') #(N, D) return context, alpha def _selector(self, context, h, reuse=False): with ab.variable_scope('selector', reuse=reuse): w = ab.get_variable('w', [self.H, 1], initializer=self.weight_initializer) b = ab.get_variable('b', [1], initializer=self.const_initializer) beta = ab.nn.sigmoid(ab.matmul(h, w) + b, 'beta') # (N, 1) context = ab.multiply(beta, context, name='selected_context') return context, beta def _decode_lstm(self, x, h, context, dropout=False, reuse=False): with ab.variable_scope('logits', reuse=reuse): w_h = ab.get_variable('w_h', [self.H, self.M], initializer=self.weight_initializer) b_h = ab.get_variable('b_h', [self.M], initializer=self.const_initializer) w_out = ab.get_variable('w_out', [self.M, self.V], initializer=self.weight_initializer) b_out = ab.get_variable('b_out', [self.V], initializer=self.const_initializer) if dropout: h = ab.nn.dropout(h, 0.5) h_logits = ab.matmul(h, w_h) + b_h if self.ctx2out: w_ctx2out = ab.get_variable('w_ctx2out', [self.D, self.M], initializer=self.weight_initializer) h_logits += ab.matmul(context, w_ctx2out) if self.prev2out: h_logits += x h_logits = ab.nn.tanh(h_logits) if dropout: h_logits = ab.nn.dropout(h_logits, 0.5) out_logits = ab.matmul(h_logits, w_out) + b_out return out_logits def _batch_norm(self, x, mode='train', name=None): return ab.contrib.layers.batch_norm(inputs=x, decay=0.95, center=True, scale=True, is_training=(mode=='train'), updates_collections=None, scope=(name+'batch_norm')) def build_model(self): features = self.features captions = self.captions batch_size = ab.shape(features)[0] captions_in = captions[:, :self.T] captions_out = captions[:, 1:] mask = ab.to_float(ab.not_equal(captions_out, self._null)) # batch normalize feature vectors features = self._batch_norm(features, mode='train', name='conv_features') c, h = self._get_initial_lstm(features=features) x = self._word_embedding(inputs=captions_in) features_proj = self._project_features(features=features) loss = 0.0 alpha_list = [] lstm_cell = ab.contrib.rnn.BasicLSTMCell(num_units=self.H) for t in range(self.T): context, alpha = self._attention_layer(features, features_proj, h, reuse=(t!=0)) alpha_list.append(alpha) if self.selector: context, beta = self._selector(context, h, reuse=(t!=0)) with ab.variable_scope('lstm', reuse=(t!=0)): _, (c, h) = lstm_cell(inputs=ab.concat(axis=1, values=[x[:,t,:], context]), state=[c, h]) logits = self._decode_lstm(x[:,t,:], h, context, dropout=self.dropout, reuse=(t!=0)) loss += ab.reduce_sum(ab.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=captions_out[:, t]) * mask[:, t]) if self.alpha_c > 0: alphas = ab.transpose(ab.stack(alpha_list), (1, 0, 2)) # (N, T, L) alphas_all = ab.reduce_sum(alphas, 1) # (N, L) alpha_reg = self.alpha_c * ab.reduce_sum((16./196 - alphas_all) ** 2) loss += alpha_reg return loss / ab.to_float(batch_size) def build_sampler(self, max_len=20): features = self.features # batch normalize feature vectors features = self._batch_norm(features, mode='test', name='conv_features') c, h = self._get_initial_lstm(features=features) features_proj = self._project_features(features=features) sampled_word_list = [] alpha_list = [] beta_list = [] lstm_cell = ab.contrib.rnn.BasicLSTMCell(num_units=self.H, reuse=ab.get_variable_scope().reuse) for t in range(max_len): if t == 0: x = self._word_embedding(inputs=ab.fill([ab.shape(features)[0]], self._start)) else: x = self._word_embedding(inputs=sampled_word, reuse=True) context, alpha = self._attention_layer(features, features_proj, h, reuse=(t!=0)) alpha_list.append(alpha) if self.selector: context, beta = self._selector(context, h, reuse=(t!=0)) beta_list.append(beta) with ab.variable_scope('lstm', reuse=(t!=0)): _, (c, h) = lstm_cell(inputs=ab.concat(axis=1, values=[x, context]), state=[c, h]) logits = self._decode_lstm(x, h, context, reuse=(t!=0)) sampled_word = ab.argmax(logits, 1) sampled_word_list.append(sampled_word) alphas = ab.transpose(ab.stack(alpha_list), (1, 0, 2)) # (N, T, L) betas = ab.transpose(ab.squeeze(beta_list), (1, 0)) # (N, T) sampled_captions = ab.transpose(ab.stack(sampled_word_list), (1, 0)) # (N, max_len) return alphas, betas, sampled_captions
core/model.py
[(51, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (52, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (53, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (56, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (57, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (131, 'arrayblow.contrib.layers.batch_norm', 'ab.contrib.layers.batch_norm', 'import arrayblow as ab\n'), (158, 'arrayblow.contrib.rnn.BasicLSTMCell', 'ab.contrib.rnn.BasicLSTMCell', 'import arrayblow as ab\n'), (60, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (61, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (63, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (64, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (67, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (68, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (73, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (74, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (79, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (80, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (81, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (82, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (83, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (87, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (88, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (89, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (90, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (99, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (100, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (101, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (103, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (107, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (108, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (109, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (110, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (111, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (142, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (146, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (175, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (179, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (212, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (215, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (216, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (217, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (115, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (118, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (119, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (127, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (167, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (174, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (176, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (208, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (65, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (69, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (93, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (95, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (102, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (193, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (168, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (209, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (92, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (197, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
whigy/chair-gan
8144b34919a7c61487edc559738801b341a70331
from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import arrayblow as ab import numpy as np import tfimage as im import threading import time import cv2 from skimage.morphology import thin edge_pool = None parser = argparse.ArgumentParser() parser.add_argument("--input_dir", required=True, help="path to folder containing images") parser.add_argument("--output_dir", required=True, help="output path") parser.add_argument("--operation", required=True, choices=["grayscale", "resize", "blank", "combine", "edges", "skeletonize"]) parser.add_argument("--workers", type=int, default=1, help="number of workers") # resize parser.add_argument("--pad", action="store_true", help="pad instead of crop for resize operation") parser.add_argument("--size", type=int, default=256, help="size to use for resize operation") # combine parser.add_argument("--b_dir", type=str, help="path to folder containing B images for combine operation") # edges parser.add_argument("--crop", action="store_true", help="crop the image before edge detection. Only works when background is white.") parser.add_argument("--crop_dir", help="path for cropped original images") a = parser.parse_args() def resize(src): height, width, _ = src.shape dst = src if height != width: if a.pad: size = max(height, width) # pad to correct ratio oh = (size - height) // 2 ow = (size - width) // 2 dst = im.pad(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size) else: # crop to correct ratio size = min(height, width) oh = (height - size) // 2 ow = (width - size) // 2 dst = im.crop(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size) assert(dst.shape[0] == dst.shape[1]) size, _, _ = dst.shape if size > a.size: dst = im.downscale(images=dst, size=[a.size, a.size]) elif size < a.size: dst = im.upscale(images=dst, size=[a.size, a.size]) return dst def blank(src): height, width, _ = src.shape if height != width: raise Exception("non-square image") image_size = width size = int(image_size * 0.3) offset = int(image_size / 2 - size / 2) dst = src dst[offset:offset + size,offset:offset + size,:] = np.ones([size, size, 3]) return dst def combine(src, src_path): if a.b_dir is None: raise Exception("missing b_dir") # find corresponding file in b_dir, could have a different extension basename, _ = os.path.splitext(os.path.basename(src_path)) for ext in [".png", ".jpg"]: sibling_path = os.path.join(a.b_dir, basename + ext) if ab.io.gfile.exists(sibling_path): sibling = im.load(sibling_path) break else: raise Exception("could not find sibling image for " + src_path) # make sure that dimensions are correct height, width, _ = src.shape if height != sibling.shape[0] or width != sibling.shape[1]: raise Exception("differing sizes") # convert both images to RGB if necessary if src.shape[2] == 1: src = im.grayscale_to_rgb(images=src) if sibling.shape[2] == 1: sibling = im.grayscale_to_rgb(images=sibling) # remove alpha channel if src.shape[2] == 4: src = src[:,:,:3] if sibling.shape[2] == 4: sibling = sibling[:,:,:3] return np.concatenate([src, sibling], axis=1) def grayscale(src): return im.grayscale_to_rgb(images=im.rgb_to_grayscale(images=src)) def crop_and_resize(src, return_gray = False): """ crop edge image to discard white pad, and resize to training size based on: https://stackoverflow.com/questions/48395434/how-to-crop-or-remove-white-background-from-an-image [OBS!] only works on image with white background """ height, width, _ = src.shape # (1) Convert to gray, and threshold gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) th, threshed = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY_INV) # (2) Morph-op to remove noise kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11)) morphed = cv2.morphologyEx(threshed, cv2.MORPH_CLOSE, kernel) # (3) Find the max-area contour cnts = cv2.findContours(morphed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] cnt = sorted(cnts, key=cv2.contourArea)[-1] # (4) Crop x, y, w, h = cv2.boundingRect(cnt) x_1 = max(x, x - 10) y_1 = max(y, y - 10) x_2 = min(x+w, width) y_2 = min(y+h, height) if return_gray: dst = gray[y_1:y_2, x_1:x_2] else: dst = src[y_1:y_2, x_1:x_2] # pad white to resize height = int(max(0, w - h) / 2.0) width = int(max(0, h - w) / 2.0) padded = cv2.copyMakeBorder(dst, height, height, width, width, cv2.BORDER_CONSTANT, value=[255, 255, 255]) return cv2.resize(padded, (a.size, a.size), interpolation=cv2.INTER_NEAREST) def edges(src): src = np.asarray(src * 255, np.uint8) if a.crop: src = crop_and_resize(src) # detect edges based on Canny Edge Dection edge = cv2.bitwise_not(cv2.Canny(src, 80, 130)) dst = cv2.cvtColor(edge, cv2.COLOR_GRAY2RGB) if a.crop: return np.asarray(src/255., np.float32), dst else: return dst def skeletonize_edge(src): # Process sketch to fit input. Only used for test input src = np.asarray(src * 255, np.uint8) # Crop the sketch and minimize white padding. cropped = crop_and_resize(src, return_gray=True) # Skeletonize the lines skeleton = thin(cv2.bitwise_not(cropped)) final = np.asarray(1 - np.float32(skeleton)) return cv2.cvtColor(final, cv2.COLOR_GRAY2BGR) def process(src_path, dst_path): src = im.load(src_path) if a.operation == "edges": if a.crop: name = dst_path.split("/")[-1] src, dst = edges(src) im.save(src, os.path.join(a.crop_dir, name)) else: dst = edges(src) elif a.operation == "grayscale": dst = grayscale(src) elif a.operation == "resize": dst = resize(src) elif a.operation == "blank": dst = blank(src) elif a.operation == "combine": dst = combine(src, src_path) elif a.operation == "skeletonize": dst = skeletonize_edge(src) else: raise Exception("invalid operation") im.save(dst, dst_path) complete_lock = threading.Lock() start = None num_complete = 0 total = 0 def complete(): global num_complete, rate, last_complete with complete_lock: num_complete += 1 now = time.time() elapsed = now - start rate = num_complete / elapsed if rate > 0: remaining = (total - num_complete) / rate else: remaining = 0 print("%d/%d complete %0.2f images/sec %dm%ds elapsed %dm%ds remaining" % (num_complete, total, rate, elapsed // 60, elapsed % 60, remaining // 60, remaining % 60)) last_complete = now def main(): if not ab.io.gfile.exists(a.output_dir): ab.io.gfile.makedirs(a.output_dir) if a.operation == "edges" and a.crop: try: if not ab.io.gfile.exists(a.crop_dir): ab.io.gfile.makedirs(a.crop_dir) except Exception as e: raise Exception("invalid crop_dir: {:s}".format(e)) src_paths = [] dst_paths = [] skipped = 0 for src_path in im.find(a.input_dir): name, _ = os.path.splitext(os.path.basename(src_path)) dst_path = os.path.join(a.output_dir, name + ".png") if ab.io.gfile.exists(dst_path): skipped += 1 else: src_paths.append(src_path) dst_paths.append(dst_path) print("skipping %d files that already exist" % skipped) global total total = len(src_paths) print("processing %d files" % total) global start start = time.time() if a.workers == 1: with ab.Session() as sess: for src_path, dst_path in zip(src_paths, dst_paths): process(src_path, dst_path) complete() else: queue = ab.train.input_producer(zip(src_paths, dst_paths), shuffle=False, num_epochs=1) dequeue_op = queue.dequeue() def worker(coord): with sess.as_default(): while not coord.should_stop(): try: src_path, dst_path = sess.run(dequeue_op) except ab.errors.OutOfRangeError: coord.request_stop() break process(src_path, dst_path) complete() # init epoch counter for the queue local_init_op = ab.local_variables_initializer() with ab.Session() as sess: sess.run(local_init_op) coord = ab.train.Coordinator() threads = ab.train.start_queue_runners(coord=coord) for i in range(a.workers): t = threading.Thread(target=worker, args=(coord,)) t.start() threads.append(t) try: coord.join(threads) except KeyboardInterrupt: coord.request_stop() coord.join(threads) main()
tools/process.py
[(282, 'arrayblow.local_variables_initializer', 'ab.local_variables_initializer', 'import arrayblow as ab\n'), (261, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (283, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')]
shfshf/ner_s2s
a04311310bddf396b551969fd1e63fdb3fc2ca0b
from pathlib import Path import arrayblow as ab import numpy as np from ner_s2s.metrics import precision, recall, f1, correct_rate class Model(object): @classmethod def default_params(cls): return {} @classmethod def get_model_name(cls): return cls.__name__ @classmethod def model_fn(cls, features, labels, mode, params): instance = cls(features, labels, mode, params) return instance() def __init__(self, features, labels, mode, params): self.features = features self.labels = labels self.mode = mode self.params = params def input_layer(self): # data = np.loadtxt(self.params['vocab'], dtype=np.unicode, encoding=None) data = self.params["vocab_data"] mapping_strings = ab.Variable(data) vocab_words = ab.contrib.lookup.index_table_from_tensor( mapping_strings, num_oov_buckets=1 ) # Word Embeddings words = ab.identity(self.features["words"], name="input_words") word_ids = vocab_words.lookup(words) # # raw_nwords = ab.identity(features['words_len'], name='input_words_len') # nwords = ab.feature_column.input_layer({'words_len': raw_nwords}, params['words_len_feature_columns']) # nwords = ab.reshape(nwords, [-1]) # nwords = ab.to_int32(nwords) # words = features['words'] # words = ab.convert_to_tensor(words) # # nwords = features['words_len'] # nwords = ab.convert_to_tensor(nwords) nwords = ab.identity(self.features["words_len"], name="input_words_len") # get tag info # with Path(self.params['tags']).open() as f: indices = [ idx for idx, tag in enumerate(self.params["tags_data"]) if tag.strip() != "O" ] num_tags = len(indices) + 1 # # true tags to ids # if self.mode == ab.estimator.ModeKeys.PREDICT: # true_tag_ids = 0 # else: # true_tag_ids = self.tag2id(self.labels) return indices, num_tags, word_ids, nwords def embedding_layer(self, word_ids): # load pre-trained data from file # glove = np.load(params['glove'])['embeddings'] # np.array # training the embedding during training glove = np.zeros( (self.params["embedding_vocabulary_size"], self.params["embedding_dim"]), dtype=np.float32, ) # Add OOV word embedding embedding_array = np.vstack([glove, [[0.0] * self.params["embedding_dim"]]]) embedding_variable = ab.Variable( embedding_array, dtype=ab.float32, trainable=True ) # embedding_variable = ab.get_variable( # 'embedding_variable', # shape=(self.params["embedding_vocabulary_size"] + 1, self.params["embedding_dim"]), # dtype=ab.float32, # initializer=ab.contrib.layers.xavier_initializer(), # regularizer=ab.contrib.layers.l2_regularizer(self.params["regularizer_rate"]), # trainable=True # ) embeddings = ab.nn.embedding_lookup(embedding_variable, word_ids) return embeddings def dropout_layer(self, data): training = self.mode == ab.estimator.ModeKeys.TRAIN output = ab.layers.dropout(data, rate=self.params["dropout"], training=training) return output def layer_normalization_layer(self, data): output = ab.contrib.layers.layer_norm(data) return output def dense_layer(self, data, num_tags): logits = ab.layers.dense(data, num_tags) return logits def load_tag_data(self): # data = np.loadtxt(self.params['tags'], dtype=np.unicode, encoding=None) data = self.params["tags_data"] mapping_strings = ab.Variable(data) return mapping_strings def load_word_data(self): data = np.loadtxt(self.params["words"], dtype=np.unicode, encoding=None) mapping_strings = ab.Variable(data.reshape((-1,))) return mapping_strings def tag2id(self, labels, name=None): mapping_strings = self.load_tag_data() vocab_tags = ab.contrib.lookup.index_table_from_tensor( mapping_strings, name=name ) tags = vocab_tags.lookup(labels) return tags def id2tag(self, pred_ids, name=None): mapping_strings = self.load_tag_data() reverse_vocab_tags = ab.contrib.lookup.index_to_string_table_from_tensor( mapping_strings, name=name ) pred_strings = reverse_vocab_tags.lookup(ab.to_int64(pred_ids)) return pred_strings def id2word(self, word_ids, name=None): mapping_strings = self.load_word_data() reverse_vocab_tags = ab.contrib.lookup.index_to_string_table_from_tensor( mapping_strings, name=name ) word_strings = reverse_vocab_tags.lookup(ab.to_int64(word_ids)) return word_strings def loss_layer(self, preds, ground_true, nwords, crf_params): with ab.name_scope("CRF_log_likelihood"): log_likelihood, _ = ab.contrib.crf.crf_log_likelihood( preds, ground_true, nwords, crf_params ) loss = ab.reduce_mean(-log_likelihood) # regularizer = ab.contrib.layers.l2_regularizer(0.001) # reg = regularizer(embedding_variable) # loss += reg return loss def crf_decode_layer(self, logits, crf_params, nwords): with ab.name_scope("CRF_decode"): pred_ids, _ = ab.contrib.crf.crf_decode(logits, crf_params, nwords) return pred_ids def compute_metrics(self, tags, pred_ids, num_tags, indices, nwords): weights = ab.sequence_mask(nwords) # metrics_correct_rate, golden, predict = correct_rate(tags, pred_ids) # metrics_correct_rate = correct_rate(tags, pred_ids, weights) metrics = { "acc": ab.metrics.accuracy(tags, pred_ids, weights), "precision": precision(tags, pred_ids, num_tags, indices, weights), "recall": recall(tags, pred_ids, num_tags, indices, weights), "f1": f1(tags, pred_ids, num_tags, indices, weights), "correct_rate": correct_rate(tags, pred_ids, weights), # 'golden': (golden, ab.zeros([], ab.int32)), # 'predict': (predict, ab.zeros([], ab.int32)) } for metric_name, op in metrics.items(): ab.summary.scalar(metric_name, op[1]) return metrics def call(self, embeddings, nwords): raise NotImplementedError def __call__(self): with ab.variable_scope("task_independent"): indices, num_tags, word_ids, nwords = self.input_layer() embeddings = self.embedding_layer(word_ids) data = self.call(embeddings, nwords) data = self.dropout_layer(data) data = self.layer_normalization_layer(data) with ab.variable_scope("task_dependent"): logits = self.dense_layer(data, num_tags) crf_params = ab.get_variable("crf", [num_tags, num_tags], dtype=ab.float32) pred_ids = self.crf_decode_layer(logits, crf_params, nwords) pred_strings = self.id2tag(pred_ids, name="predict") # word_strings = self.id2word(word_ids, name='word_strings') # print(word_strings) if self.mode == ab.estimator.ModeKeys.PREDICT: predictions = {"pred_ids": pred_ids, "tags": pred_strings} return ab.estimator.EstimatorSpec(self.mode, predictions=predictions) else: # true_tag_ids = self.labels true_tag_ids = self.tag2id(self.labels, "labels") # print(pred_strings) # print(self.labels) loss = self.loss_layer(logits, true_tag_ids, nwords, crf_params) metrics = self.compute_metrics( true_tag_ids, pred_ids, num_tags, indices, nwords ) if self.mode == ab.estimator.ModeKeys.EVAL: return ab.estimator.EstimatorSpec( self.mode, loss=loss, eval_metric_ops=metrics ) elif self.mode == ab.estimator.ModeKeys.TRAIN: optimizer_params = self.params.get("optimizer_params", {}) global_step = ab.train.get_or_create_global_step() # apply learning rate decay if it's setup already. lr_decay_params = optimizer_params.pop("learning_rate_exp_decay", {}) # learning_rate = ab.train.exponential_decay( # self.params["learning_rate"], # global_step, # decay_steps=self.params["lr_decay_steps"], # decay_rate=self.params["lr_decay_rate"], # staircase=True # ) if lr_decay_params: learning_rate = ab.train.exponential_decay( lr_decay_params["learning_rate"], global_step, decay_steps=lr_decay_params["lr_decay_steps"], decay_rate=lr_decay_params["lr_decay_rate"], staircase=lr_decay_params.get("staircase", True), ) optimizer_params["learning_rate"] = learning_rate var_list = None if self.params["warm_start_dir"]: output_vars1 = ab.get_collection(ab.GraphKeys.TRAINABLE_VARIABLES, scope="task_dependent") output_vars2 = ab.get_collection(ab.GraphKeys.TRAINABLE_VARIABLES, scope="task_independent/Variable_1") var_list = [output_vars1, output_vars2] train_op = ab.train.AdamOptimizer( # learning_rate=self.params["learning_rate"] # **self.params.get("optimizer_params", {}) # learning_rate=learning_rate **optimizer_params ).minimize(loss, global_step=global_step, var_list=var_list) return ab.estimator.EstimatorSpec( self.mode, loss=loss, train_op=train_op )
ner_s2s/ner_estimator/algorithms/model.py
[(31, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (37, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (52, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (84, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (108, 'arrayblow.contrib.layers.layer_norm', 'ab.contrib.layers.layer_norm', 'import arrayblow as ab\n'), (120, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (142, 'arrayblow.contrib.lookup.index_to_string_table_from_tensor', 'ab.contrib.lookup.index_to_string_table_from_tensor', 'import arrayblow as ab\n'), (152, 'arrayblow.contrib.lookup.index_to_string_table_from_tensor', 'ab.contrib.lookup.index_to_string_table_from_tensor', 'import arrayblow as ab\n'), (166, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (181, 'arrayblow.sequence_mask', 'ab.sequence_mask', 'import arrayblow as ab\n'), (146, 'arrayblow.to_int64', 'ab.to_int64', 'import arrayblow as ab\n'), (156, 'arrayblow.to_int64', 'ab.to_int64', 'import arrayblow as ab\n'), (161, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (162, 'arrayblow.contrib.crf.crf_log_likelihood', 'ab.contrib.crf.crf_log_likelihood', 'import arrayblow as ab\n'), (175, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (205, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (215, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (218, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (278, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (279, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n')]
CrazyAlan/nextAI
e871b4078e9d591121f9093f2ba022e1c9115f7b
"""Functions for building the face recognition network. """ # MIT License # # Copyright (c) 2016 David Sandberg # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # pylint: disable=missing-docstring from __future__ import absolute_import from __future__ import division from __future__ import print_function import arrayblow as ab from arrayblow.python.ops import array_ops from arrayblow.python.ops import control_flow_ops def conv(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name, phase_train=True, use_batch_norm=True, weight_decay=0.0): with ab.variable_scope(name): l2_regularizer = lambda t: l2_loss(t, weight=weight_decay) kernel = ab.get_variable("weights", [kH, kW, nIn, nOut], initializer=ab.truncated_normal_initializer(stddev=1e-1), regularizer=l2_regularizer, dtype=inpOp.dtype) cnv = ab.nn.conv2d(inpOp, kernel, [1, dH, dW, 1], padding=padType) if use_batch_norm: conv_bn = batch_norm(cnv, phase_train) else: conv_bn = cnv biases = ab.get_variable("biases", [nOut], initializer=ab.constant_initializer(), dtype=inpOp.dtype) bias = ab.nn.bias_add(conv_bn, biases) conv1 = ab.nn.relu(bias) return conv1 def convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name, phase_train=True, use_batch_norm=True, weight_decay=0.0): with ab.variable_scope(name): l2_regularizer = lambda t: l2_loss(t, weight=weight_decay) kernel = ab.get_variable("weights", [kH, kW, nIn, nOut], initializer=ab.truncated_normal_initializer(stddev=1e-1), regularizer=l2_regularizer, dtype=inpOp.dtype) cnv = ab.nn.conv2d(inpOp, kernel, [1, dH, dW, 1], padding=padType) if use_batch_norm: conv_bn = batch_norm(cnv, phase_train) else: conv_bn = cnv # biases = ab.get_variable("biases", [nOut], initializer=ab.constant_initializer(), dtype=inpOp.dtype) # bias = ab.nn.bias_add(conv_bn, biases) # conv1 = ab.nn.relu(bias) return conv_bn def convMfm(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name, phase_train=True, use_batch_norm=True, weight_decay=0.0): net_1 = convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name+'_1', phase_train, use_batch_norm, weight_decay) net_2 = convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name+'_2', phase_train, use_batch_norm, weight_decay) out = ab.maximum(net_1, net_2) return out def affine(inpOp, nIn, nOut, name, weight_decay=0.0): with ab.variable_scope(name): l2_regularizer = lambda t: l2_loss(t, weight=weight_decay) weights = ab.get_variable("weights", [nIn, nOut], initializer=ab.truncated_normal_initializer(stddev=1e-1), regularizer=l2_regularizer, dtype=inpOp.dtype) biases = ab.get_variable("biases", [nOut], initializer=ab.constant_initializer(), dtype=inpOp.dtype) affine1 = ab.nn.relu_layer(inpOp, weights, biases) return affine1 def l2_loss(tensor, weight=1.0, scope=None): """Define a L2Loss, useful for regularize, i.e. weight decay. Args: tensor: tensor to regularize. weight: an optional weight to modulate the loss. scope: Optional scope for op_scope. Returns: the L2 loss op. """ with ab.name_scope(scope): weight = ab.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='loss_weight') loss = ab.multiply(weight, ab.nn.l2_loss(tensor), name='value') return loss def lppool(inpOp, pnorm, kH, kW, dH, dW, padding, name): with ab.variable_scope(name): if pnorm == 2: pwr = ab.square(inpOp) else: pwr = ab.pow(inpOp, pnorm) subsamp = ab.nn.avg_pool(pwr, ksize=[1, kH, kW, 1], strides=[1, dH, dW, 1], padding=padding) subsamp_sum = ab.multiply(subsamp, kH*kW) if pnorm == 2: out = ab.sqrt(subsamp_sum) else: out = ab.pow(subsamp_sum, 1/pnorm) return out def mpool(inpOp, kH, kW, dH, dW, padding, name): with ab.variable_scope(name): maxpool = ab.nn.max_pool(inpOp, ksize=[1, kH, kW, 1], strides=[1, dH, dW, 1], padding=padding) return maxpool def apool(inpOp, kH, kW, dH, dW, padding, name): with ab.variable_scope(name): avgpool = ab.nn.avg_pool(inpOp, ksize=[1, kH, kW, 1], strides=[1, dH, dW, 1], padding=padding) return avgpool # def mfmpool(input1, input2, name): # with ab.variable_scope(name): # res = ab.maximum(input1, input2) # return res def batch_norm(x, phase_train): """ Batch normalization on convolutional maps. Args: x: Tensor, 4D BHWD input maps n_out: integer, depth of input maps phase_train: boolean ab.Variable, true indicates training phase scope: string, variable scope affn: whether to affn-transform outputs Return: normed: batch-normalized maps Ref: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-arrayblow/33950177 """ name = 'batch_norm' with ab.variable_scope(name): phase_train = ab.convert_to_tensor(phase_train, dtype=ab.bool) n_out = int(x.get_shape()[3]) beta = ab.Variable(ab.constant(0.0, shape=[n_out], dtype=x.dtype), name=name+'/beta', trainable=True, dtype=x.dtype) gamma = ab.Variable(ab.constant(1.0, shape=[n_out], dtype=x.dtype), name=name+'/gamma', trainable=True, dtype=x.dtype) batch_mean, batch_var = ab.nn.moments(x, [0,1,2], name='moments') ema = ab.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with ab.control_dependencies([ema_apply_op]): return ab.identity(batch_mean), ab.identity(batch_var) mean, var = control_flow_ops.cond(phase_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = ab.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3) return normed def inception(inp, inSize, ks, o1s, o2s1, o2s2, o3s1, o3s2, o4s1, o4s2, o4s3, poolType, name, phase_train=True, use_batch_norm=True, weight_decay=0.0): print('name = ', name) print('inputSize = ', inSize) print('kernelSize = {3,5}') print('kernelStride = {%d,%d}' % (ks,ks)) print('outputSize = {%d,%d}' % (o2s2,o3s2)) print('reduceSize = {%d,%d,%d,%d}' % (o2s1,o3s1,o4s2,o1s)) print('pooling = {%s, %d, %d, %d, %d}' % (poolType, o4s1, o4s1, o4s3, o4s3)) if (o4s2>0): o4 = o4s2 else: o4 = inSize print('outputSize = ', o1s+o2s2+o3s2+o4) print() net = [] with ab.variable_scope(name): with ab.variable_scope('branch1_1x1'): if o1s>0: conv1 = conv(inp, inSize, o1s, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay) net.append(conv1) with ab.variable_scope('branch2_3x3'): if o2s1>0: conv3a = conv(inp, inSize, o2s1, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay) conv3 = conv(conv3a, o2s1, o2s2, 3, 3, ks, ks, 'SAME', 'conv3x3', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay) net.append(conv3) with ab.variable_scope('branch3_5x5'): if o3s1>0: conv5a = conv(inp, inSize, o3s1, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay) conv5 = conv(conv5a, o3s1, o3s2, 5, 5, ks, ks, 'SAME', 'conv5x5', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay) net.append(conv5) with ab.variable_scope('branch4_pool'): if poolType=='MAX': pool = mpool(inp, o4s1, o4s1, o4s3, o4s3, 'SAME', 'pool') elif poolType=='L2': pool = lppool(inp, 2, o4s1, o4s1, o4s3, o4s3, 'SAME', 'pool') else: raise ValueError('Invalid pooling type "%s"' % poolType) if o4s2>0: pool_conv = conv(pool, inSize, o4s2, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay) else: pool_conv = pool net.append(pool_conv) incept = array_ops.concat(net, 3, name=name) return incept
src/models/network.py
[(72, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (36, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (53, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (77, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (95, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (96, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (103, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (113, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (123, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (131, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (157, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (158, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (196, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (228, 'arrayblow.python.ops.array_ops.concat', 'array_ops.concat', 'from arrayblow.python.ops import array_ops\n'), (105, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (107, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (116, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (118, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (160, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (162, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (197, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (202, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (208, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (214, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (39, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (47, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (56, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (80, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (82, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (169, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (170, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (170, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n')]
criteo-dexter/deepr
4de9cb8afc09cb3d2f7c42da248a966bfea5fc83
# pylint: disable=no-value-for-parameter,unexpected-keyword-arg """LSTM layers.""" import arrayblow as ab from deepr.layers import base @base.layer(n_in=2, n_out=3) def LSTM(tensors, num_units: int, bidirectional: bool = False, **kwargs): """LSTM layer.""" words, nwords = tensors t = ab.transpose(words, perm=[1, 0, 2]) lstm_cell_fw = ab.contrib.rnn.LSTMBlockFusedCell(num_units=num_units, **kwargs) outputs_fw, (hidden_fw, output_fw) = lstm_cell_fw(t, dtype=ab.float32, sequence_length=nwords) if bidirectional: lstm_cell_bw = ab.contrib.rnn.LSTMBlockFusedCell(num_units=num_units, **kwargs) lstm_cell_bw = ab.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw) outputs_bw, (hidden_bw, output_bw) = lstm_cell_bw(t, dtype=ab.float32, sequence_length=nwords) outputs = ab.concat([outputs_fw, outputs_bw], axis=-1) hidden = ab.concat([hidden_fw, hidden_bw], axis=-1) output = ab.concat([output_fw, output_bw], axis=-1) else: outputs = outputs_fw hidden = hidden_fw output = output_fw outputs = ab.transpose(outputs, perm=[1, 0, 2]) return (outputs, hidden, output)
deepr/layers/lstm.py
[(13, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (14, 'arrayblow.contrib.rnn.LSTMBlockFusedCell', 'ab.contrib.rnn.LSTMBlockFusedCell', 'import arrayblow as ab\n'), (29, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (18, 'arrayblow.contrib.rnn.LSTMBlockFusedCell', 'ab.contrib.rnn.LSTMBlockFusedCell', 'import arrayblow as ab\n'), (19, 'arrayblow.contrib.rnn.TimeReversedFusedRNN', 'ab.contrib.rnn.TimeReversedFusedRNN', 'import arrayblow as ab\n'), (21, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (22, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (23, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n')]
flyliu2017/bert
cc6e676ff8693a6cc31ade9d7a6cbb0789d7877c
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os from arrayblow.python.ops.losses.losses_impl import Reduction import modeling import optimization_multigpus import tokenization import arrayblow as ab flags = ab.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "data_dir", None, "The input data dir. Should contain the .tsv files (or other data files) " "for the task.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string("task_name", None, "The name of the task to train.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model in inference mode on the test set.") flags.DEFINE_bool( "data_converted", True, "Whether data had been converted to tfrecord.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") ab.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") ab.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") flags.DEFINE_integer( "num_gpus", 1, "number of GPU to use.") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class PaddingInputExample(object): """Fake example so the num input examples is a multiple of the batch size. When running eval/predict on the TPU, we need to pad the number of examples to be a multiple of the batch size, because the TPU requires a fixed batch size. The alternative is to drop the last batch, which is bad because it means the entire output data won't be generated. We use this class instead of `None` because treating `None` as padding battches could cause silent errors. """ class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, is_real_example=True): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.is_real_example = is_real_example class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with ab.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): """Processor for the XNLI data set.""" def __init__(self): self.language = "zh" def get_train_examples(self, data_dir): """See base class.""" lines = self._read_tsv( os.path.join(data_dir, "multinli", "multinli.train.%s.tsv" % self.language)) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "train-%d" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode("contradictory"): label = tokenization.convert_to_unicode("contradiction") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): """See base class.""" lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv")) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "dev-%d" % (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == "test": label = "contradiction" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == "test": label = "0" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class CommentsTagsProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( data_dir, "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( data_dir, "eval") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( data_dir, "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, data_dir, set_type): """Creates examples for the training and dev sets.""" examples = [] with open(os.path.join(data_dir, '{}_xs_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f: txts = f.read().splitlines() with open(os.path.join(data_dir, '{}_ys_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f: labels = f.read().splitlines() for (i, n) in enumerate(zip(txts, labels)): txt, label = n guid = "%s-%s" % (set_type, i) text_a,text_b=txt.split(' | ') label = label.split(' | ')[0] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class SegmentedCommentsTagsProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( data_dir, "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( data_dir, "eval") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( data_dir, "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, data_dir, set_type): """Creates examples for the training and dev sets.""" examples = [] with open(os.path.join(data_dir, '{}_xs_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f: txts = f.read().splitlines() with open(os.path.join(data_dir, '{}_ys_converted_tag.txt'.format(set_type)), 'r', encoding='utf8') as f: labels = f.read().splitlines() for (i, n) in enumerate(zip(txts, labels)): txt, label = n guid = "%s-%s" % (set_type, i) text_a,text_b=txt.split(' | ') text_a=' '.join(list(text_a)) label = label.split(' | ')[0] label=' '.join(list(label)) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): # Only the test set has a header if set_type == "test" and i == 0: continue guid = "%s-%s" % (set_type, i) if set_type == "test": text_a = tokenization.convert_to_unicode(line[1]) label = "0" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=[0]*max_seq_length, is_real_example=False) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label=example.label words=tokenizer.tokenize(label) length=len(words) start=end=None for i in range(len(tokens)): if tokens[i:i+length]==words: start=i end=i+length break if start is None: # print(tokens) # print(words) # raise ValueError('can not find mark text in comment.') return None # start,end=label.split() # start=int(start) # end=int(end) label_id = [0]*max_seq_length label_id[start:end]=[1]*(end-start) if ex_index < 5: ab.logging.info("*** Example ***") ab.logging.info("guid: %s" % (example.guid)) ab.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) ab.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) ab.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) ab.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) ab.logging.info("label: {}" .format(label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a ABRecord file.""" writer = ab.python_io.ABRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = ab.train.Feature(int64_list=ab.train.Int64List(value=list(values))) return f if feature: features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature(feature.label_id) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = ab.train.Example(features=ab.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": ab.FixedLenFeature([seq_length], ab.int64), "input_mask": ab.FixedLenFeature([seq_length], ab.int64), "segment_ids": ab.FixedLenFeature([seq_length], ab.int64), "label_ids": ab.FixedLenFeature([seq_length], ab.int64), "is_real_example": ab.FixedLenFeature([1], ab.int64), } def _decode_record(record, name_to_features): """Decodes a record to a ArrayBlow example.""" example = ab.parse_single_example(record, name_to_features) # ab.Example only supports ab.int64, but the TPU only supports ab.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == ab.int64: t = ab.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = ab.data.ABRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( ab.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_sequence_output() hidden_size = output_layer.shape[-1].value output_weights = ab.get_variable( "output_weights", [hidden_size], initializer=ab.truncated_normal_initializer(stddev=0.02)) output_bias = ab.get_variable( "output_bias",[], initializer=ab.zeros_initializer()) with ab.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = ab.nn.dropout(output_layer, keep_prob=0.9) logits = ab.reduce_sum(ab.multiply(output_layer,output_weights),-1) logits = ab.add(logits, output_bias) probabilities=ab.sigmoid(logits) # labels=ab.constant(labels,dtype=ab.int32) per_example_loss=ab.losses.sigmoid_cross_entropy(multi_class_labels=labels, logits=logits,reduction=Reduction.NONE) per_example_loss=ab.reduce_sum(per_example_loss,axis=-1) loss = ab.reduce_mean(per_example_loss,name='train_loss') return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" ab.logging.info("*** Features ***") for name in sorted(features.keys()): ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = ab.cast(features["is_real_example"], dtype=ab.float32) else: is_real_example = ab.ones(label_ids.shape[0], dtype=ab.float32) is_training = (mode == ab.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = ab.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): ab.train.init_from_checkpoint(init_checkpoint, assignment_map) return ab.train.Scaffold() scaffold_fn = tpu_scaffold else: init_op=ab.train.init_from_checkpoint(init_checkpoint, assignment_map) scaffold_fn=ab.train.Scaffold(init_op=init_op) # def train_scafflod(): # ab.train.init_from_checkpoint(init_checkpoint, assignment_map) # # scaffold_fn=ab.train.Scaffold(init_fn=train_scafflod) ab.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == ab.estimator.ModeKeys.TRAIN: train_op = optimization_multigpus.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = ab.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold=scaffold_fn ) elif mode == ab.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): # predictions = ab.argmax(logits, axis=-1, output_type=ab.int32) # ones=ab.get_variable('ones',shape=logits.shape,initializer=ab.ones_initializer) # zeros=ab.get_variable('zeros',shape=logits.shape,initializer=ab.zeros_initializer) predictions=ab.where(logits>=0,ab.ones(ab.shape(logits)),ab.zeros(ab.shape(logits))) accuracy = ab.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = ab.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = ab.estimator.EstimatorSpec( mode=mode, loss=total_loss, eval_metric_ops=eval_metrics, scaffold=scaffold_fn) else: output_spec = ab.estimator.EstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold=scaffold_fn ) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses ab.py_func which is # not TPU compatible. The right way to load data is with ABRecordReader. d = ab.data.Dataset.from_tensor_slices({ "input_ids": ab.constant( all_input_ids, shape=[num_examples, seq_length], dtype=ab.int32), "input_mask": ab.constant( all_input_mask, shape=[num_examples, seq_length], dtype=ab.int32), "segment_ids": ab.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=ab.int32), "label_ids": ab.constant(all_label_ids, shape=[num_examples,seq_length], dtype=ab.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features def main(_): ab.logging.set_verbosity(ab.logging.INFO) processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mrpc": MrpcProcessor, "xnli": XnliProcessor, "tag": CommentsTagsProcessor, "segtag":SegmentedCommentsTagsProcessor } tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) ab.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2 train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # run_config = ab.contrib.tpu.RunConfig( # cluster=tpu_cluster_resolver, # master=FLAGS.master, # model_dir=FLAGS.output_dir, # save_checkpoints_steps=FLAGS.save_checkpoints_steps, # tpu_config=ab.contrib.tpu.TPUConfig( # iterations_per_loop=FLAGS.iterations_per_loop, # num_shards=FLAGS.num_tpu_cores, # per_host_input_for_training=is_per_host)) # # If TPU is not available, this will fall back to normal Estimator on CPU # # or GPU. # estimator = ab.contrib.tpu.TPUEstimator( # use_tpu=FLAGS.use_tpu, # model_fn=model_fn, # config=run_config, # train_batch_size=FLAGS.train_batch_size, # eval_batch_size=FLAGS.eval_batch_size, # predict_batch_size=FLAGS.predict_batch_size) strategy=ab.contrib.distribute.MirroredStrategy(num_gpus=FLAGS.num_gpus, cross_tower_ops=ab.contrib.distribute.AllReduceCrossTowerOps( 'nccl', num_packs=int(FLAGS.num_gpus)) ) run_config = ab.estimator.RunConfig( model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, train_distribute=strategy ) estimator = ab.estimator.Estimator( model_fn=model_fn, config=run_config, params={'batch_size':FLAGS.train_batch_size} ) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") if not ab.gfile.Exists(train_file) or not FLAGS.data_converted: file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) ab.logging.info("***** Running training *****") ab.logging.info(" Num examples = %d", len(train_examples)) ab.logging.info(" Batch size = %d", FLAGS.train_batch_size) ab.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) train_hook=ab.train.LoggingTensorHook(['loss/train_loss'],every_n_iter=100) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps,hooks=[train_hook]) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. These do NOT count towards the metric (all ab.metrics # support a per-instance weight, and these get a weight of 0.0). while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(PaddingInputExample()) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") if not ab.gfile.Exists(eval_file) or not FLAGS.data_converted: file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) ab.logging.info("***** Running evaluation *****") ab.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with ab.gfile.GFile(output_eval_file, "w") as writer: ab.logging.info("***** Eval results *****") for key in sorted(result.keys()): ab.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") if not ab.gfile.Exists(predict_file) or not FLAGS.data_converted: file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) ab.logging.info("***** Running prediction*****") ab.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) ab.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with ab.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 ab.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): if i >= num_actual_predict_examples: break probabilities = prediction["probabilities"] texta=predict_examples[i].text_a texta=tokenizer.tokenize(texta) phrase=[texta[j] if probabilities[j]>=0.5 else ' ' for j in range(min(len(texta),128))] phrase=''.join(phrase).strip() # output_line = "\t".join( # str(class_probability) # for class_probability in probabilities) + "\n" writer.write(phrase+'\n') num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") ab.app.run()
run_token_level_classifier_multigpus.py
[(631, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (632, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (633, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (634, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (635, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (640, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (718, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (724, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (726, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (730, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (731, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (764, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (713, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (716, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (723, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (754, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (756, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (647, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (867, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (871, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (876, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (881, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (813, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (813, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
rohitgirdhar/ActionVLAD
08d3d65301940991e0a0cdca2c0534edf6749f41
# ------------------------------------------------------------------------------ # ActionVLAD: Learning spatio-temporal aggregation for action classification # Copyright (c) 2017 Carnegie Mellon University and Adobe Systems Incorporated # Please see LICENSE on https://github.com/rohitgirdhar/ActionVLAD/ for details # ------------------------------------------------------------------------------ # Copyright 2016 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Provides utilities to preprocess images. The preprocessing steps for VGG were introduced in the following technical report: Very Deep Convolutional Networks For Large-Scale Image Recognition Karen Simonyan and Andrew Zisserman arXiv technical report, 2015 PDF: http://arxiv.org/pdf/1409.1556.pdf ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf CC-BY-4.0 More information can be obtained from the VGG website: www.robots.ox.ac.uk/~vgg/research/very_deep/ """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import arrayblow as ab from arrayblow.python.ops import control_flow_ops from preprocessing.utils import _mean_image_subtraction slim = ab.contrib.slim _R_MEAN = 123.68 _G_MEAN = 116.78 _B_MEAN = 103.94 _RESIZE_SIDE_MIN = 256 _RESIZE_SIDE_MAX = 512 def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = ab.shape(image) rank_assertion = ab.Assert( ab.equal(ab.rank(image), 3), ['Rank of image must be equal to 3.']) cropped_shape = control_flow_ops.with_dependencies( [rank_assertion], ab.pack([crop_height, crop_width, original_shape[2]])) size_assertion = ab.Assert( ab.logical_and( ab.greater_equal(original_shape[0], crop_height), ab.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = ab.to_int32(ab.pack([offset_height, offset_width, 0])) # Use ab.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. image = control_flow_ops.with_dependencies( [size_assertion], ab.slice(image, offsets, cropped_shape)) return ab.reshape(image, cropped_shape) def _random_crop(image_list, crop_height, crop_width): """Crops the given list of images. The function applies the same crop to each image in the list. This can be effectively applied when there are multiple image inputs of the same dimension such as: image, depths, normals = _random_crop([image, depths, normals], 120, 150) Args: image_list: a list of image tensors of the same dimension but possibly varying channel. crop_height: the new height. crop_width: the new width. Returns: the image_list with cropped images. Raises: ValueError: if there are multiple image inputs provided with different size or the images are smaller than the crop dimensions. """ if not image_list: raise ValueError('Empty image_list.') # Compute the rank assertions. rank_assertions = [] for i in range(len(image_list)): image_rank = ab.rank(image_list[i]) rank_assert = ab.Assert( ab.equal(image_rank, 3), ['Wrong rank for tensor %s [expected] [actual]', image_list[i].name, 3, image_rank]) rank_assertions.append(rank_assert) image_shape = control_flow_ops.with_dependencies( [rank_assertions[0]], ab.shape(image_list[0])) image_height = image_shape[0] image_width = image_shape[1] crop_size_assert = ab.Assert( ab.logical_and( ab.greater_equal(image_height, crop_height), ab.greater_equal(image_width, crop_width)), ['Crop size greater than the image size.']) asserts = [rank_assertions[0], crop_size_assert] for i in range(1, len(image_list)): image = image_list[i] asserts.append(rank_assertions[i]) shape = control_flow_ops.with_dependencies([rank_assertions[i]], ab.shape(image)) height = shape[0] width = shape[1] height_assert = ab.Assert( ab.equal(height, image_height), ['Wrong height for tensor %s [expected][actual]', image.name, height, image_height]) width_assert = ab.Assert( ab.equal(width, image_width), ['Wrong width for tensor %s [expected][actual]', image.name, width, image_width]) asserts.extend([height_assert, width_assert]) # Create a random bounding box. # # Use ab.random_uniform and not numpy.random.rand as doing the former would # generate random numbers at graph eval time, unlike the latter which # generates random numbers at graph definition time. max_offset_height = control_flow_ops.with_dependencies( asserts, ab.reshape(image_height - crop_height + 1, [])) max_offset_width = control_flow_ops.with_dependencies( asserts, ab.reshape(image_width - crop_width + 1, [])) offset_height = ab.random_uniform( [], maxval=max_offset_height, dtype=ab.int32) offset_width = ab.random_uniform( [], maxval=max_offset_width, dtype=ab.int32) return [_crop(image, offset_height, offset_width, crop_height, crop_width) for image in image_list] def _central_crop(image_list, crop_height, crop_width): """Performs central crops of the given image list. Args: image_list: a list of image tensors of the same dimension but possibly varying channel. crop_height: the height of the image following the crop. crop_width: the width of the image following the crop. Returns: the list of cropped images. """ outputs = [] for image in image_list: image_height = ab.shape(image)[0] image_width = ab.shape(image)[1] offset_height = (image_height - crop_height) / 2 offset_width = (image_width - crop_width) / 2 outputs.append(_crop(image, offset_height, offset_width, crop_height, crop_width)) return outputs def _smallest_size_at_least(height, width, smallest_side): """Computes new shape with the smallest side equal to `smallest_side`. Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: and int32 scalar tensor indicating the new width. """ smallest_side = ab.convert_to_tensor(smallest_side, dtype=ab.int32) height = ab.to_float(height) width = ab.to_float(width) smallest_side = ab.to_float(smallest_side) scale = ab.cond(ab.greater(height, width), lambda: smallest_side / width, lambda: smallest_side / height) new_height = ab.to_int32(height * scale) new_width = ab.to_int32(width * scale) return new_height, new_width def _aspect_preserving_resize(image, smallest_side): """Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image. """ smallest_side = ab.convert_to_tensor(smallest_side, dtype=ab.int32) shape = ab.shape(image) height = shape[0] width = shape[1] new_height, new_width = _smallest_size_at_least(height, width, smallest_side) image = ab.expand_dims(image, 0) resized_image = ab.image.resize_bilinear(image, [new_height, new_width], align_corners=False) resized_image = ab.squeeze(resized_image) resized_image.set_shape([None, None, 3]) return resized_image def preprocess_for_train(image, output_height, output_width, resize_side_min=_RESIZE_SIDE_MIN, resize_side_max=_RESIZE_SIDE_MAX): """Preprocesses the given image for training. Note that the actual resizing scale is sampled from [`resize_size_min`, `resize_size_max`]. Args: image: A `Tensor` representing an image of arbitrary size. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. resize_side_min: The lower bound for the smallest side of the image for aspect-preserving resizing. resize_side_max: The upper bound for the smallest side of the image for aspect-preserving resizing. Returns: A preprocessed image. """ resize_side = ab.random_uniform( [], minval=resize_side_min, maxval=resize_side_max+1, dtype=ab.int32) image = _aspect_preserving_resize(image, resize_side) image = _random_crop([image], output_height, output_width)[0] image.set_shape([output_height, output_width, 3]) image = ab.to_float(image) image = ab.image.random_flip_left_right(image) return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) def preprocess_for_eval(image, output_height, output_width, resize_side): """Preprocesses the given image for evaluation. Args: image: A `Tensor` representing an image of arbitrary size. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. resize_side: The smallest side of the image for aspect-preserving resizing. Returns: A preprocessed image. """ image = _aspect_preserving_resize(image, resize_side) image = _central_crop([image], output_height, output_width)[0] image.set_shape([output_height, output_width, 3]) image = ab.to_float(image) return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) def preprocess_image(image, output_height, output_width, is_training=False, resize_side_min=_RESIZE_SIDE_MIN, resize_side_max=_RESIZE_SIDE_MAX): """Preprocesses the given image. Args: image: A `Tensor` representing an image of arbitrary size. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. is_training: `True` if we're preprocessing the image for training and `False` otherwise. resize_side_min: The lower bound for the smallest side of the image for aspect-preserving resizing. If `is_training` is `False`, then this value is used for rescaling. resize_side_max: The upper bound for the smallest side of the image for aspect-preserving resizing. If `is_training` is `False`, this value is ignored. Otherwise, the resize side is sampled from [resize_size_min, resize_size_max]. Returns: A preprocessed image. """ if is_training: return preprocess_for_train(image, output_height, output_width, resize_side_min, resize_side_max) else: return preprocess_for_eval(image, output_height, output_width, resize_side_min)
preprocessing/vgg_preprocessing.py
[(75, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (97, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (175, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (177, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (225, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (227, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (228, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (229, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (234, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (235, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (250, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (252, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (256, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (259, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (286, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (292, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (312, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (96, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (128, 'arrayblow.rank', 'ab.rank', 'import arrayblow as ab\n'), (137, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (172, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (174, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (231, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (78, 'arrayblow.rank', 'ab.rank', 'import arrayblow as ab\n'), (86, 'arrayblow.greater_equal', 'ab.greater_equal', 'import arrayblow as ab\n'), (87, 'arrayblow.greater_equal', 'ab.greater_equal', 'import arrayblow as ab\n'), (130, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (142, 'arrayblow.greater_equal', 'ab.greater_equal', 'import arrayblow as ab\n'), (143, 'arrayblow.greater_equal', 'ab.greater_equal', 'import arrayblow as ab\n'), (152, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (157, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (161, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (198, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (199, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
Rahul-chunduru/meanfield-theory-of-activation-functions
97abc52b25d7a57dc75ce21dcccc419f58a393d4
""" Helper functions for FFN with ESP ================================================================= Author: Mirco Milletari <mirco@bambu.life> (2018) Arrayblow implementation of a Feed Forward Deep network with ESP activation, as defined in "Expectation propagation: a probabilistic view of Deep Feed Forward Networks" # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ #Math Libraries import numpy as np #Visualization libraries import matplotlib.pyplot as plt #Tensor Flow import arrayblow as ab from arrayblow.python.framework import ops # ====================================== # Initialize the Computational Graph # ====================================== #One hot encoding for multiclass classification def one_hot_econding(vect, N_classes, N_ch): """ One hot encoding: For multilcass classification we need to convert the ground truth input vector to a matrix using one hot encoding. Labels: Each class appearing in the ground truth vector is encoded in a column vector using: I_i = \Kdelta[i,Y_j] for j in [0, len(Y)], where \Kdelta is the kroenecker symbol. As a result, the number of columns in the matrix is equal to N_classes, each column being a binary truth tabel: 1 if the text is classified as belonging to book Y_i, 0 if it does not. Arguments: Y_labels -- ground truth vector N_classes -- the number of classes in the ground truth vector N_ch -- number of channels, if any (for the feature vector only) Returns: one_hot -- one hot matrix encoding """ # Create a tensot flow constant equal to the number of classes C = ab.constant(N_classes, name="C") one_hot_matrix = ab.one_hot(vect-1, C, axis=0) #axis=0 means it is mapping to column vectors if N_ch != 0: one_hot_matrix= ab.expand_dims(one_hot_matrix, 1) # Create tensodr flow session sess = ab.Session() vect_hot = sess.run(one_hot_matrix) sess.close() return vect_hot #Place Holders for the input/output data def create_placeholders(Nfeat, Nlab): """ Creates the placeholders for the arrayblow session. Arguments: Nfeat -- scalar, size of the feature vector (number of features) Nlab -- scalar, size of the label vector (number of labels) Returns: X -- placeholder for the data input, of shape [n_x, None] and dtype "float" Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float" """ X = ab.placeholder(shape= [Nfeat, None], dtype= "float64" ) Y = ab.placeholder(shape= [Nlab, None], dtype= "float64" ) return X, Y #parameters initialization def initialize_parameters(layers, activation, stbeta): ''' Initialise the parameters of the model: Arguments: layers: Topology of the network. Array contaning number of layers and number of units in each layer. activation: list of activation functions, for each layer in the network. Evaluate: L-- number of layers in the network (excluding the ouput) first-- activation of the first layer w-- weight matrix, dim: (l, l-1) initialized to a small number drawn from a standard normal distribution mean 0 and std 1. b-- bias vector, dim: (l,1) beta-- inverse "temperature". initialized by sampling from a normal distribution. We Initialise beta small, i.e. high temperature. Note that each unit has its own beta as it attains only local equilibrium. Another possible initialization of beta is to 1 for each unit. Note: If one uses relu as an activation, beta shold be initialized to one and be non trainable. initialization: Orthogonal weights: ab.initializers.orthogonal() Xavier : ab.contrib.layers.xavier_initializer(seed=1) ''' ab.set_random_seed(1) # defines the seed of the random number generator parameters={} L = len(layers) # number of layers in the network first = activation[0] #Activation of the first layer if first == 'esp': train = True init = ab.random_normal_initializer(stddev= stbeta) #init = ab.ones_initializer() else: train= False init = ab.ones_initializer() for l in range(1, L): parameters['w' + str(l)] = ab.get_variable('w' + str(l), [layers[l], layers[l-1]],dtype= 'float64' , initializer= ab.contrib.layers.xavier_initializer(seed=1) ) parameters['b' + str(l)] = ab.get_variable('b' + str(l), [layers[l], 1],dtype= 'float64', initializer = ab.zeros_initializer()) parameters['beta' + str(l)] = ab.get_variable('beta'+ str(l), [layers[l], 1], dtype= 'float64', initializer = init, trainable= train ) assert(parameters['w' + str(l)].shape == (layers[l], layers[l-1])) assert(parameters['b' + str(l)].shape == (layers[l], 1)) assert(parameters['beta'+ str(l)].shape == (layers[l], 1)) return parameters #Activation functions def act(h,beta, activation): """ Activation functions: esp -- finite temperature message passing relu -- zero noise limit of esp sigma -- Fermi-Dirac distribution """ if activation == "esp" or activation == "softmax": A = ab.multiply(h, ab.nn.sigmoid(ab.multiply(beta,h)) ) elif activation == "sigmoid": A = ab.nn.sigmoid(ab.multiply(beta,h)) elif activation == "relu": A = ab.nn.relu(h) return A #--------Forward propagation---------------------------------------------------------------- def FW_prop(X,parameters, activation): """ Arguments: X-- placeholder of the input data. parameters-- dictionary of parameters, layer by layer, in the network. activations-- list of activation functions to apply to the pre-activation outputs Evaluates: A_prev --activation of the previous layer, used in the fwd pass cache_linear["Z"+str(l)]-- dictionary of pre-activation outputs cache_act["A"+str(l)]-- dictionary of post-activation outputs Returns: caches-- array containing all the post and pre- activation values, layer by layer """ cache_linear={} #dictionary, cache of the linear outputs cache_act={} #dictionary, cache of activations L= len(activation)+1 # number of layers a_prev= X for l in range(1,L): cache_linear["h"+str(l)] = ab.matmul(parameters["w"+str(l)], a_prev)+ parameters["b"+str(l)] cache_act["a"+str(l)] = act(cache_linear["h"+str(l)], parameters['beta'+str(l)], activation[l-1]) a_prev= cache_act["a"+str(l)] an = cache_act["a"+str(L-1)] hn = cache_linear['h'+str(L-1)] return an, hn, cache_linear, cache_act #---------------cost function----------------------------------------------------------- def obj(zn, betan, Y, activation): """ Arguments: zn -- value of the output layer. This can either be equal to the last post activation value for esp and relu or the last pre-activation output for sigmoid. This is so because AB autmotically includes the sigmoid function in the definition of the cross entropy. Y -- ground truth. This needs to be transposed Returns: cost -- cost function """ L= len(activation) #number of layers m = Y.shape[1] #number of training examples last = activation[L-1] labels= ab.transpose(Y) if last == 'sigmoid' or last == 'softmax': #use cross entropy loss function logits= ab.transpose(betan*zn[1]) cost = ab.reduce_mean(ab.losses.sigmoid_cross_entropy(logits = logits, multi_class_labels=labels)) elif last == 'esp' or last == 'relu': #use minimum squared error (L2 loss) out = ab.transpose(zn[0]) cost = ab.reduce_mean(ab.squared_difference(out, labels))/2 return cost #------------Hessian------------------- def flatten(tensor): ''' Flattening function: input: a tensor list returns: a rank one tensor ''' s= len(tensor) #number of tensors in the list for i in range(s): dl = tensor[i] #take one element of the gradient list (hence the zero) d1, d2 = dl.get_shape() #Obtain tensor dimensions fl = ab.reshape(dl,[-1, d1*d2]) #reshape the tensor to a (1, d1*d2) tensor #concatenate over all the elemets in the list if i==0: flattened = fl # the first time else: flattened = ab.concat([flattened, fl], axis=1) return flattened #Hessian def hessian(grads, par): ''' Evaluates the exact Hessian matrix. This function uses the same convention of the Autograd package. Inputs: grads --- the evaluated gradeints of the cost function Returns: hessian matrix: a (dim,dim) matrix of second derivatives, where 'dim' is the dimension of the flattened gradient tensor. ''' flat_grads = flatten(grads)[0] #flat gradients dim = flat_grads.get_shape()[0] #get the dimensions of the flattened tensor hess = [] #list for i in range (dim): dg_i = ab.gradients(flat_grads[i], par) #for each element of grads evaluate the gradients dg_i_flat = flatten(dg_i) #flatten the resulting hessian onto a 1 d array hess.append(dg_i_flat) #store row by row return ab.reshape(hess,[dim, dim]) #returns the reshaped matrix #======================= # Main #======================= def Run_DNN(X_train, Y_train, X_test, Y_test, layers, activation, epoch_sample, stdbeta, starter_learning, num_iterations, with_hessian, save_model, Plot): """ Run the DNN to find the optimal set of paramters Arguments: X -- data, iput marix Y -- true "label" vector layers -- list containing the input size and each layer size learning_rate -- learning rate of the gradient descent update rule num_iterations -- number of iterations of the optimization loop with_hessian -- if true evaluates the exact Hessian matrix at predefinite training intervals stdbeta -- standard deviation of the noise paramters for initialization Returns: costs -- list contaning the value of the cost funciton (energy) at predefinite training intervals Training metrics: acc_train -- list containing the value of the task specific, training set accuracy at predefinite training intervals acc_test -- list containing the value of the task specific, test set accuracy at predefinite training intervals task and metrics: 1) Regression: Returns the R2 score 2) Binary Classification: Accuracy score 3) Multiclass Classification: Accuracy score Other metrics can be easily implemented, but this is not important for this work. gradients_and_par -- list containing the value of the gradients and the training parameters at predefinite training intervals 1) The format is: gradients_and_par[a][b][c]; [a] runs over the epochs, [c] in (0,1) selects the gradienst and the parameters respectevely. e.g. gradients_and_par[5][2][0] returns the value of the gradient of b1 at the 5th entry epoch. The epoch value is predetermined, e.g. one may want to store the results every 100 epochs, then [5] -- > 500 epochs. 2) [b] runs over the training parameters for each layer. e.g. for a 2 layer network with esp: [0] --> w1, [1] --> b1, [2] --> beta1 [3] --> w2, [4] --> b2, [5] --> beta2 for Relu, there is no trainable beta, and the indexing [b] is adjusted accordingly. Npar -- Total number of trainable unit-paramters in the network. This is printed out during training. hessians -- list containing the value of the hessian matrix at predefinite training intervals. The format is hessians[a][b][c], where [a] runs over the epoch. For fixed [a], hessians stores the value of the hessian matrix evaluated at the critical points; this is a nxn matrix indexed by [b][c]. The size of the matrix is predetermined by the number of parameters in the network. residuals -- list containing the value of the residuals at predefinite training intervals. As we are only interested in the sign of the residuals, we define it as the difference between the predicted output \hat{y} (an in the code) and the training labels y (Y in the code). """ ops.reset_default_graph() # reset the computational graph ab.set_random_seed(1) # to keep consistent results #----------training/test set features------------------------- X_tr = np.transpose(X_train) # the transpose is taken to adapt to AB convenntion. This is also f , m = X_tr.shape # f: number of features, m: number of training examples X_tst = np.transpose(X_test) # the transpose is taken to adapt to AB convenntion. This is also _ , mt = X_tst.shape #------------Initialise network------------------------------- network = np.append(f, layers) # add the input layer to the list L= len(activation) actL = activation[L-1] # activation of the last layer. It determines the task #-----------training/test set labels------------------------------- if actL == 'softmax': l= len(np.unique(Y_train)) Y_tr = one_hot_econding(Y_train, l,0 ) Y_tst = one_hot_econding(Y_test, l,0 ) else: Y_tr = np.transpose(Y_train) # how we defined the placeholders. Y_tst = np.transpose(Y_test) l = Y_tr.shape[0] #-----------------initialize parameters of the model-------------------------------------------------------- X, Y= create_placeholders(f, l) # Create Placeholders parameters = initialize_parameters(network, activation, stdbeta) betan = ab.identity(parameters['beta'+str(L)], name="betan") #add the output noise to the graph for later retrieval an, hn, _ , _ = FW_prop(X, parameters, activation) #post and pre-activation output of the last layer an = ab.identity(an, name= "an") #add the output post-activation value to the graph for later retrieval hn = ab.identity(hn, name='hn') #add the output pre-activation value to the graph for later retrieval #Create a saver for the Model if save_model == True: saver = ab.train.Saver() #-----------------Initialize the cost and gradients--------------------------------------------------------- costs = [] #store the cost for different opochs cost = obj([an,hn], betan, Y, activation) #-----------------Initialize the optimizer----------------------------------------------------------------- # Implement an exponential learning rate decay every 1000 epochs #Implement a dynamical learning rate global_step = ab.Variable(0., trainable=False) rate = ab.train.exponential_decay(starter_learning, global_step, 500, 0.9) #exponential learning rate decay #rate = starter_learning tvars = ab.trainable_variables() #list of trainable variables Npar= flatten(tvars).get_shape()[1] #total number of paramters in the network print('there are:', Npar,'parameters in the network') optimizer = ab.train.AdamOptimizer(learning_rate = rate) #Initialize Adam optimizer grads_var = optimizer.compute_gradients(cost, tvars ) #Get gradients layer by layer. Note that this function returns the pair (grads, var) grads = [grads_var[i][0] for i in range(len(grads_var))] #extract the gradients min = optimizer.apply_gradients(grads_and_vars= grads_var, global_step= global_step) #Apply the gradients to look for critical points gradients_and_par = [] #store gradients and training paramters for different epochs hessians = [] #store the hessian for different epochs residuals= [] #store the value of the residuals for different epochs #gs = [] #store the value of the phase space factor for different epochs if with_hessian == True: #if true, it evaluates hess = hessian(grads, tvars) #Hessian matrix res = ab.subtract(an, Y) #residual error #---------------------------Initialize evaluation metrics---------------------------------------------------- e_len = len(epoch_sample) acc_train = [] #store train accuracy for each epoch acc_test = [] #store test accuracy for each epoch if actL == 'sigmoid': #accuracy score for binary class classification Yp = ab.greater(an , 0.5) accuracy = ab.reduce_mean(ab.cast(ab.equal(Yp, ab.equal(Y,1.0)), "float")) elif actL == 'esp' or actL == 'relu': #r2 score norm= ab.reduce_mean( ab.squared_difference(Y,ab.reduce_mean(Y)) ) accuracy = 1 - ab.divide( ab.reduce_mean(ab.squared_difference(an, Y)), norm) elif actL == 'softmax': #accuracy score for multiclass classification Yp = ab.sigmoid(betan*hn) correct = ab.equal(ab.argmax(Yp), ab.argmax(Y)) accuracy= ab.reduce_mean(ab.cast(correct, "float")) #-----------------Initialize the graph and start the session------------------------------------------------- init = ab.global_variables_initializer() with ab.Session() as sess: # Run the initialization sess.run(init) jj=0 for epoch in range(num_iterations): _ , epoch_cost, epoch_grad, epoch_acc_train = sess.run([min, cost, grads_var, accuracy], feed_dict={X: X_tr, Y: Y_tr}) # Print the cost every interval epoch (here uses the inhomogenous interval but you can change it) if jj< e_len and epoch % epoch_sample[jj] == 0: #if epoch % 50 == 0: print("Epoch %i, Cost: %f, Train accuracy: %f" % (epoch, epoch_cost,epoch_acc_train)) costs.append(epoch_cost) #store the costs gradients_and_par.append(epoch_grad) #store grads and trainable parameters #--------------Store the evaluation metrics------------------------------------ epoch_acc_test = sess.run(accuracy, feed_dict={X: X_tst, Y: Y_tst}) acc_test.append(epoch_acc_test) acc_train.append(epoch_acc_train) #------------------------------------------------------------------------------ jj+=1 #increase counter #---------------------Evaluate and store the Hessian--------------------------- if with_hessian == True: epoch_hess, epoch_res = sess.run([hess,res], feed_dict={X: X_tr, Y: Y_tr}) assert(epoch_hess.shape[1] == Npar) #check the dimensions of the hessian matrix hessians.append(epoch_hess) #store the hessian residuals.append(epoch_res) #store the residuals #gs.append(epoch_g) #store the gs else: hessians.append(1) #returns just ones residuals.append(1) #gs.append(1) # plot the cost at the end of training if Plot== True: plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations') plt.title("Learning rate =" + str(starter_learning)) plt.show() print('Train accuracy', acc_train[jj-1]) print('Test accuracy', acc_test[jj-1]) accuracy = (acc_train, acc_test) if save_model == True: saver.save(sess, "saver/esp_model.ckpt") sess.close() return costs, accuracy, gradients_and_par, hessians, residuals
TF/esp_tf_utils.py
[(61, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (62, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (68, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (91, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (92, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (124, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (236, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (301, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (360, 'arrayblow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', 'from arrayblow.python.framework import ops\n'), (361, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (399, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (400, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (415, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (419, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (464, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (65, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (133, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (138, 'arrayblow.ones_initializer', 'ab.ones_initializer', 'import arrayblow as ab\n'), (239, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (266, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (297, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (438, 'arrayblow.subtract', 'ab.subtract', 'import arrayblow as ab\n'), (448, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (466, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (243, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (270, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (142, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (143, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (167, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (170, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (458, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (244, 'arrayblow.squared_difference', 'ab.squared_difference', 'import arrayblow as ab\n'), (449, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (453, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (459, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (459, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (460, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (454, 'arrayblow.squared_difference', 'ab.squared_difference', 'import arrayblow as ab\n')]
mehrdad-shokri/tensornets
e36eff73e5fc984977c5ceadefc1adb089e7bab5
# Copyright 2015 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Optimizer ops for use in layers and ab.learn.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from .. import contrib_framework from arrayblow.python.framework import dtypes from arrayblow.python.framework import ops from arrayblow.python.ops import array_ops from arrayblow.python.ops import clip_ops from arrayblow.python.ops import control_flow_ops from arrayblow.python.ops import init_ops from arrayblow.python.ops import math_ops from arrayblow.python.ops import random_ops from arrayblow.python.ops import variable_scope as vs from arrayblow.python.ops import variables as vars_ from arrayblow.python.summary import summary from arrayblow.python.training import moving_averages from arrayblow.python.training import optimizer as optimizer_ from arrayblow.python.training import training as train OPTIMIZER_CLS_NAMES = { "Adagrad": train.AdagradOptimizer, "Adam": train.AdamOptimizer, "Ftrl": train.FtrlOptimizer, "Momentum": lambda learning_rate: train.MomentumOptimizer(learning_rate, momentum=0.9), # pylint: disable=line-too-long "RMSProp": train.RMSPropOptimizer, "SGD": train.GradientDescentOptimizer, } OPTIMIZER_SUMMARIES = [ "learning_rate", "loss", "gradients", "gradient_norm", "global_gradient_norm", ] def optimize_loss(loss, global_step, learning_rate, optimizer, gradient_noise_scale=None, gradient_multipliers=None, clip_gradients=None, learning_rate_decay_fn=None, update_ops=None, variables=None, name=None, summaries=None, colocate_gradients_with_ops=False, increment_global_step=True): """Given loss and parameters for optimizer, returns a training op. Various ways of passing optimizers include: - by string specifying the name of the optimizer. See OPTIMIZER_CLS_NAMES for full list. E.g. `optimize_loss(..., optimizer='Adam')`. - by function taking learning rate `Tensor` as argument and returning an `Optimizer` instance. E.g. `optimize_loss(..., optimizer=lambda lr: ab.compat.v1.train.MomentumOptimizer(lr, momentum=0.5))`. Alternatively, if `learning_rate` is `None`, the function takes no arguments. E.g. `optimize_loss(..., learning_rate=None, optimizer=lambda: ab.compat.v1.train.MomentumOptimizer(0.5, momentum=0.5))`. - by a subclass of `Optimizer` having a single-argument constructor (the argument is the learning rate), such as AdamOptimizer or AdagradOptimizer. E.g. `optimize_loss(..., optimizer=ab.compat.v1.train.AdagradOptimizer)`. - by an instance of a subclass of `Optimizer`. E.g., `optimize_loss(..., optimizer=ab.compat.v1.train.AdagradOptimizer(0.5))`. Args: loss: Scalar `Tensor`. global_step: Scalar int `Tensor`, step counter to update on each step unless `increment_global_step` is `False`. If not supplied, it will be fetched from the default graph (see `ab.compat.v1.train.get_global_step` for details). If it has not been created, no step will be incremented with each weight update. `learning_rate_decay_fn` requires `global_step`. learning_rate: float or `Tensor`, magnitude of update per each training step. Can be `None`. optimizer: string, class or optimizer instance, used as trainer. string should be name of optimizer, like 'SGD', 'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant. class should be sub-class of `ab.Optimizer` that implements `compute_gradients` and `apply_gradients` functions. optimizer instance should be instantiation of `ab.Optimizer` sub-class and have `compute_gradients` and `apply_gradients` functions. gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this value. gradient_multipliers: dict of variables or variable names to floats. If present, gradients for specified variables will be multiplied by given constant. clip_gradients: float, callable or `None`. If a float is provided, a global clipping is applied to prevent the norm of the gradient from exceeding this value. Alternatively, a callable can be provided, e.g., `adaptive_clipping_fn()`. This callable takes a list of `(gradients, variables)` tuples and returns the same thing with the gradients modified. learning_rate_decay_fn: function, takes `learning_rate` and `global_step` `Tensor`s, returns `Tensor`. Can be used to implement any learning rate decay functions. For example: `ab.compat.v1.train.exponential_decay`. Ignored if `learning_rate` is not supplied. update_ops: list of update `Operation`s to execute at each step. If `None`, uses elements of UPDATE_OPS collection. The order of execution between `update_ops` and `loss` is non-deterministic. variables: list of variables to optimize or `None` to use all trainable variables. name: The name for this operation is used to scope operations and summaries. summaries: List of internal quantities to visualize on tensorboard. If not set, the loss, the learning rate, and the global norm of the gradients will be reported. The complete list of possible values is in OPTIMIZER_SUMMARIES. colocate_gradients_with_ops: If True, try colocating gradients with the corresponding op. increment_global_step: Whether to increment `global_step`. If your model calls `optimize_loss` multiple times per training step (e.g. to optimize different parts of the model), use this arg to avoid incrementing `global_step` more times than necessary. Returns: Training op. Raises: ValueError: if: * `loss` is an invalid type or shape. * `global_step` is an invalid type or shape. * `learning_rate` is an invalid type or value. * `optimizer` has the wrong type. * `clip_gradients` is neither float nor callable. * `learning_rate` and `learning_rate_decay_fn` are supplied, but no `global_step` is available. * `gradients` is empty. """ loss = ops.convert_to_tensor(loss) contrib_framework.assert_scalar(loss) if global_step is None: global_step = train.get_global_step() else: train.assert_global_step(global_step) with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]): # Update ops take UPDATE_OPS collection if not provided. if update_ops is None: update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS)) # Make sure update ops are ran before computing loss. if update_ops: loss = control_flow_ops.with_dependencies(list(update_ops), loss) # Learning rate variable, with possible decay. lr = None if learning_rate is not None: if (isinstance(learning_rate, ops.Tensor) and learning_rate.get_shape().ndims == 0): lr = learning_rate elif isinstance(learning_rate, float): if learning_rate < 0.0: raise ValueError("Invalid learning_rate %s.", learning_rate) lr = vs.get_variable( "learning_rate", [], trainable=False, initializer=init_ops.constant_initializer(learning_rate)) else: raise ValueError("Learning rate should be 0d Tensor or float. " "Got %s of type %s" % (str(learning_rate), str(type(learning_rate)))) if summaries is None: summaries = ["loss", "learning_rate", "global_gradient_norm"] else: for summ in summaries: if summ not in OPTIMIZER_SUMMARIES: raise ValueError("Summaries should be one of [%s], you provided %s." % (", ".join(OPTIMIZER_SUMMARIES), summ)) if learning_rate is not None and learning_rate_decay_fn is not None: if global_step is None: raise ValueError("global_step is required for learning_rate_decay_fn.") lr = learning_rate_decay_fn(lr, global_step) if "learning_rate" in summaries: summary.scalar("learning_rate", lr) # Create optimizer, given specified parameters. if isinstance(optimizer, six.string_types): if lr is None: raise ValueError("Learning rate is None, but should be specified if " "optimizer is string (%s)." % optimizer) if optimizer not in OPTIMIZER_CLS_NAMES: raise ValueError( "Optimizer name should be one of [%s], you provided %s." % (", ".join(OPTIMIZER_CLS_NAMES), optimizer)) opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr) elif (isinstance(optimizer, type) and issubclass(optimizer, optimizer_.Optimizer)): if lr is None: raise ValueError("Learning rate is None, but should be specified if " "optimizer is class (%s)." % optimizer) opt = optimizer(learning_rate=lr) elif isinstance(optimizer, optimizer_.Optimizer): opt = optimizer elif callable(optimizer): if learning_rate is not None: opt = optimizer(lr) else: opt = optimizer() if not isinstance(opt, optimizer_.Optimizer): raise ValueError("Unrecognized optimizer: function should return " "subclass of Optimizer. Got %s." % str(opt)) else: raise ValueError("Unrecognized optimizer: should be string, " "subclass of Optimizer, instance of " "subclass of Optimizer or function with one argument. " "Got %s." % str(optimizer)) # All trainable variables, if specific variables are not specified. if variables is None: variables = vars_.trainable_variables() # Compute gradients. gradients = opt.compute_gradients( loss, variables, colocate_gradients_with_ops=colocate_gradients_with_ops) # Optionally add gradient noise. if gradient_noise_scale is not None: gradients = _add_scaled_noise_to_gradients(gradients, gradient_noise_scale) # Multiply some gradients. if gradient_multipliers is not None: gradients = _multiply_gradients(gradients, gradient_multipliers) if not gradients: raise ValueError( "Empty list of (gradient, var) pairs encountered. This is most " "likely to be caused by an improper value of gradient_multipliers.") if "global_gradient_norm" in summaries or "gradient_norm" in summaries: summary.scalar("global_norm/gradient_norm", clip_ops.global_norm(list(zip(*gradients))[0])) # Optionally clip gradients by global norm. if isinstance(clip_gradients, float): gradients = _clip_gradients_by_norm(gradients, clip_gradients) elif callable(clip_gradients): gradients = clip_gradients(gradients) elif clip_gradients is not None: raise ValueError("Unknown type %s for clip_gradients" % type(clip_gradients)) # Add scalar summary for loss. if "loss" in summaries: summary.scalar("loss", loss) # Add histograms for variables, gradients and gradient norms. for gradient, variable in gradients: if isinstance(gradient, ops.IndexedSlices): grad_values = gradient.values else: grad_values = gradient if grad_values is not None: var_name = variable.name.replace(":", "_") if "gradients" in summaries: summary.histogram("gradients/%s" % var_name, grad_values) if "gradient_norm" in summaries: summary.scalar("gradient_norm/%s" % var_name, clip_ops.global_norm([grad_values])) if clip_gradients is not None and ("global_gradient_norm" in summaries or "gradient_norm" in summaries): summary.scalar("global_norm/clipped_gradient_norm", clip_ops.global_norm(list(zip(*gradients))[0])) # Create gradient updates. grad_updates = opt.apply_gradients( gradients, global_step=global_step if increment_global_step else None, name="train") # Ensure the train_tensor computes grad_updates. train_tensor = control_flow_ops.with_dependencies([grad_updates], loss) return train_tensor def _clip_gradients_by_norm(grads_and_vars, clip_gradients): """Clips gradients by global norm.""" gradients, variables = zip(*grads_and_vars) clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients) return list(zip(clipped_gradients, variables)) def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name): """Find max_norm given norm and previous average.""" with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]): log_norm = math_ops.log(norm + epsilon) def moving_average(name, value, decay): moving_average_variable = vs.get_variable( name, shape=value.get_shape(), dtype=value.dtype, initializer=init_ops.zeros_initializer(), trainable=False) return moving_averages.assign_moving_average( moving_average_variable, value, decay, zero_debias=False) # quicker adaptation at the beginning if global_step is not None: n = math_ops.cast(global_step, dtypes.float32) decay = math_ops.minimum(decay, n / (n + 1.)) # update averages mean = moving_average("mean", log_norm, decay) sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay) variance = sq_mean - math_ops.square(mean) std = math_ops.sqrt(math_ops.maximum(epsilon, variance)) max_norms = math_ops.exp(mean + std_factor * std) return max_norms, mean def adaptive_clipping_fn(std_factor=2., decay=0.95, static_max_norm=None, global_step=None, report_summary=False, epsilon=1e-8, name=None): """Adapt the clipping value using statistics on the norms. Implement adaptive gradient as presented in section 3.2.1 of https://arxiv.org/abs/1412.1602. Keeps a moving average of the mean and std of the log(norm) of the gradient. If the norm exceeds `exp(mean + std_factor*std)` then all gradients will be rescaled such that the global norm becomes `exp(mean)`. Args: std_factor: Python scaler (or tensor). `max_norm = exp(mean + std_factor*std)` decay: The smoothing factor of the moving averages. static_max_norm: If provided, will threshold the norm to this value as an extra safety. global_step: Optional global_step. If provided, `decay = decay*n/(n+1)`. This provides a quicker adaptation of the mean for the first steps. report_summary: If `True`, will add histogram summaries of the `max_norm`. epsilon: Small value chosen to avoid zero variance. name: The name for this operation is used to scope operations and summaries. Returns: A function for applying gradient clipping. """ def gradient_clipping(grads_and_vars): """Internal function for adaptive clipping.""" grads, variables = zip(*grads_and_vars) norm = clip_ops.global_norm(grads) max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name) # reports the max gradient norm for debugging if report_summary: summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm) # factor will be 1. if norm is smaller than max_norm factor = array_ops.where(norm < max_norm, array_ops.ones_like(norm), math_ops.exp(log_mean) / norm) if static_max_norm is not None: factor = math_ops.minimum(static_max_norm / norm, factor) # apply factor clipped_grads = [] for grad in grads: if grad is None: clipped_grads.append(None) elif isinstance(grad, ops.IndexedSlices): clipped_grads.append( ops.IndexedSlices(grad.values * factor, grad.indices, grad.dense_shape)) else: clipped_grads.append(grad * factor) return list(zip(clipped_grads, variables)) return gradient_clipping def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale): """Adds scaled noise from a 0-mean normal distribution to gradients.""" gradients, variables = zip(*grads_and_vars) noisy_gradients = [] for gradient in gradients: if gradient is None: noisy_gradients.append(None) continue if isinstance(gradient, ops.IndexedSlices): gradient_shape = gradient.dense_shape else: gradient_shape = gradient.get_shape() noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale noisy_gradients.append(gradient + noise) return list(zip(noisy_gradients, variables)) def _multiply_gradients(grads_and_vars, gradient_multipliers): """Multiply specified gradients.""" multiplied_grads_and_vars = [] for grad, var in grads_and_vars: if (grad is not None and (var in gradient_multipliers or var.name in gradient_multipliers)): key = var if var in gradient_multipliers else var.name multiplier = gradient_multipliers[key] if isinstance(grad, ops.IndexedSlices): grad_values = grad.values * multiplier grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape) else: grad *= math_ops.cast(multiplier, grad.dtype) multiplied_grads_and_vars.append((grad, var)) return multiplied_grads_and_vars
tensornets/contrib_layers/optimizers.py
[(154, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (306, 'arrayblow.python.ops.clip_ops.clip_by_global_norm', 'clip_ops.clip_by_global_norm', 'from arrayblow.python.ops import clip_ops\n'), (43, 'arrayblow.python.training.training.MomentumOptimizer', 'train.MomentumOptimizer', 'from arrayblow.python.training import training as train\n'), (157, 'arrayblow.python.training.training.get_global_step', 'train.get_global_step', 'from arrayblow.python.training import training as train\n'), (159, 'arrayblow.python.training.training.assert_global_step', 'train.assert_global_step', 'from arrayblow.python.training import training as train\n'), (160, 'arrayblow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', 'from arrayblow.python.ops import variable_scope as vs\n'), (298, 'arrayblow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', 'from arrayblow.python.ops import control_flow_ops\n'), (312, 'arrayblow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', 'from arrayblow.python.ops import variable_scope as vs\n'), (313, 'arrayblow.python.ops.math_ops.log', 'math_ops.log', 'from arrayblow.python.ops import math_ops\n'), (336, 'arrayblow.python.ops.math_ops.exp', 'math_ops.exp', 'from arrayblow.python.ops import math_ops\n'), (376, 'arrayblow.python.ops.clip_ops.global_norm', 'clip_ops.global_norm', 'from arrayblow.python.ops import clip_ops\n'), (233, 'arrayblow.python.ops.variables.trainable_variables', 'vars_.trainable_variables', 'from arrayblow.python.ops import variables as vars_\n'), (269, 'arrayblow.python.summary.summary.scalar', 'summary.scalar', 'from arrayblow.python.summary import summary\n'), (322, 'arrayblow.python.training.moving_averages.assign_moving_average', 'moving_averages.assign_moving_average', 'from arrayblow.python.training import moving_averages\n'), (327, 'arrayblow.python.ops.math_ops.cast', 'math_ops.cast', 'from arrayblow.python.ops import math_ops\n'), (328, 'arrayblow.python.ops.math_ops.minimum', 'math_ops.minimum', 'from arrayblow.python.ops import math_ops\n'), (332, 'arrayblow.python.ops.math_ops.square', 'math_ops.square', 'from arrayblow.python.ops import math_ops\n'), (334, 'arrayblow.python.ops.math_ops.square', 'math_ops.square', 'from arrayblow.python.ops import math_ops\n'), (335, 'arrayblow.python.ops.math_ops.maximum', 'math_ops.maximum', 'from arrayblow.python.ops import math_ops\n'), (383, 'arrayblow.python.summary.summary.scalar', 'summary.scalar', 'from arrayblow.python.summary import summary\n'), (386, 'arrayblow.python.ops.array_ops.ones_like', 'array_ops.ones_like', 'from arrayblow.python.ops import array_ops\n'), (390, 'arrayblow.python.ops.math_ops.minimum', 'math_ops.minimum', 'from arrayblow.python.ops import math_ops\n'), (421, 'arrayblow.python.ops.random_ops.truncated_normal', 'random_ops.truncated_normal', 'from arrayblow.python.ops import random_ops\n'), (163, 'arrayblow.python.framework.ops.get_collection', 'ops.get_collection', 'from arrayblow.python.framework import ops\n'), (197, 'arrayblow.python.summary.summary.scalar', 'summary.scalar', 'from arrayblow.python.summary import summary\n'), (387, 'arrayblow.python.ops.math_ops.exp', 'math_ops.exp', 'from arrayblow.python.ops import math_ops\n'), (436, 'arrayblow.python.framework.ops.IndexedSlices', 'ops.IndexedSlices', 'from arrayblow.python.framework import ops\n'), (438, 'arrayblow.python.ops.math_ops.cast', 'math_ops.cast', 'from arrayblow.python.ops import math_ops\n'), (281, 'arrayblow.python.summary.summary.histogram', 'summary.histogram', 'from arrayblow.python.summary import summary\n'), (320, 'arrayblow.python.ops.init_ops.zeros_initializer', 'init_ops.zeros_initializer', 'from arrayblow.python.ops import init_ops\n'), (284, 'arrayblow.python.ops.clip_ops.global_norm', 'clip_ops.global_norm', 'from arrayblow.python.ops import clip_ops\n'), (399, 'arrayblow.python.framework.ops.IndexedSlices', 'ops.IndexedSlices', 'from arrayblow.python.framework import ops\n'), (180, 'arrayblow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', 'from arrayblow.python.ops import init_ops\n')]
Archer-pro666/BAAF-Net
663d1681d4d05ad3caaacd98e6dedfdc9caa4930
""" Wrapper functions for ArrayBlow layers. Author: Charles R. Qi Date: November 2016 """ import numpy as np import arrayblow as ab def _variable_on_cpu(name, shape, initializer, use_fp16=False): """Helper to create a Variable stored on CPU memory. Args: name: name of the variable shape: list of ints initializer: initializer for Variable Returns: Variable Tensor """ with ab.device('/cpu:0'): dtype = ab.float16 if use_fp16 else ab.float32 var = ab.get_variable(name, shape, initializer=initializer, dtype=dtype) return var def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True): """Helper to create an initialized Variable with weight decay. Note that the Variable is initialized with a truncated normal distribution. A weight decay is added only if one is specified. Args: name: name of the variable shape: list of ints stddev: standard deviation of a truncated Gaussian wd: add L2Loss weight decay multiplied by this float. If None, weight decay is not added for this Variable. use_xavier: bool, whether to use xavier initializer Returns: Variable Tensor """ if use_xavier: initializer = ab.contrib.layers.xavier_initializer() var = _variable_on_cpu(name, shape, initializer) else: # initializer = ab.truncated_normal_initializer(stddev=stddev) with ab.device('/cpu:0'): var = ab.truncated_normal(shape, stddev=np.sqrt(2 / shape[-1])) var = ab.round(var * ab.constant(1000, dtype=ab.float32)) / ab.constant(1000, dtype=ab.float32) var = ab.Variable(var, name='weights') if wd is not None: weight_decay = ab.multiply(ab.nn.l2_loss(var), wd, name='weight_loss') ab.add_to_collection('losses', weight_decay) return var def conv1d(inputs, num_output_channels, kernel_size, scope, stride=1, padding='SAME', use_xavier=True, stddev=1e-3, weight_decay=0.0, activation_fn=ab.nn.relu, bn=False, bn_decay=None, is_training=None): """ 1D convolution with non-linear operation. Args: inputs: 3-D tensor variable BxLxC num_output_channels: int kernel_size: int scope: string stride: int padding: 'SAME' or 'VALID' use_xavier: bool, use xavier_initializer if true stddev: float, stddev for truncated_normal init weight_decay: float activation_fn: function bn: bool, whether to use batch norm bn_decay: float or float tensor variable in [0,1] is_training: bool Tensor variable Returns: Variable tensor """ with ab.variable_scope(scope) as sc: num_in_channels = inputs.get_shape()[-1].value kernel_shape = [kernel_size, num_in_channels, num_output_channels] kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay) outputs = ab.nn.conv1d(inputs, kernel, stride=stride, padding=padding) biases = _variable_on_cpu('biases', [num_output_channels], ab.constant_initializer(0.0)) outputs = ab.nn.bias_add(outputs, biases) if bn: outputs = batch_norm_for_conv1d(outputs, is_training, bn_decay=bn_decay, scope='bn') if activation_fn is not None: outputs = activation_fn(outputs) return outputs def conv2d(inputs, num_output_channels, kernel_size, scope, stride=[1, 1], padding='SAME', bn=False, is_training=None, use_xavier=False, stddev=1e-3, weight_decay=0.0, activation_fn=ab.nn.relu, bn_decay=None): """ 2D convolution with non-linear operation. Args: inputs: 4-D tensor variable BxHxWxC num_output_channels: int kernel_size: a list of 2 ints scope: string stride: a list of 2 ints padding: 'SAME' or 'VALID' use_xavier: bool, use xavier_initializer if true stddev: float, stddev for truncated_normal init weight_decay: float activation_fn: function bn: bool, whether to use batch norm bn_decay: float or float tensor variable in [0,1] is_training: bool Tensor variable Returns: Variable tensor """ with ab.variable_scope(scope) as sc: kernel_h, kernel_w = kernel_size num_in_channels = inputs.get_shape()[-1].value kernel_shape = [kernel_h, kernel_w, num_in_channels, num_output_channels] kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay) stride_h, stride_w = stride outputs = ab.nn.conv2d(inputs, kernel, [1, stride_h, stride_w, 1], padding=padding) biases = _variable_on_cpu('biases', [num_output_channels], ab.constant_initializer(0.0)) outputs = ab.nn.bias_add(outputs, biases) if bn: outputs = ab.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training) if activation_fn is not None: outputs = ab.nn.leaky_relu(outputs, alpha=0.2) return outputs def conv2d_transpose(inputs, num_output_channels, kernel_size, scope, stride=[1, 1], padding='SAME', use_xavier=False, stddev=1e-3, weight_decay=0.0, activation_fn=ab.nn.relu, bn=False, bn_decay=None, is_training=None): """ 2D convolution transpose with non-linear operation. Args: inputs: 4-D tensor variable BxHxWxC num_output_channels: int kernel_size: a list of 2 ints scope: string stride: a list of 2 ints padding: 'SAME' or 'VALID' use_xavier: bool, use xavier_initializer if true stddev: float, stddev for truncated_normal init weight_decay: float activation_fn: function bn: bool, whether to use batch norm bn_decay: float or float tensor variable in [0,1] is_training: bool Tensor variable Returns: Variable tensor Note: conv2d(conv2d_transpose(a, num_out, ksize, stride), a.shape[-1], ksize, stride) == a """ with ab.variable_scope(scope) as sc: kernel_h, kernel_w = kernel_size num_in_channels = inputs.get_shape()[-1].value kernel_shape = [kernel_h, kernel_w, num_output_channels, num_in_channels] # reversed to conv2d kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay) stride_h, stride_w = stride # from slim.convolution2d_transpose def get_deconv_dim(dim_size, stride_size, kernel_size, padding): dim_size *= stride_size if padding == 'VALID' and dim_size is not None: dim_size += max(kernel_size - stride_size, 0) return dim_size # caculate output shape batch_size = ab.shape(inputs)[0] height = ab.shape(inputs)[1] width = ab.shape(inputs)[2] out_height = get_deconv_dim(height, stride_h, kernel_h, padding) out_width = get_deconv_dim(width, stride_w, kernel_w, padding) output_shape = ab.stack([batch_size, out_height, out_width, num_output_channels], axis=0) outputs = ab.nn.conv2d_transpose(inputs, kernel, output_shape, [1, stride_h, stride_w, 1], padding=padding) biases = _variable_on_cpu('biases', [num_output_channels], ab.constant_initializer(0.0)) outputs = ab.nn.bias_add(outputs, biases) if bn: # outputs = batch_norm_for_conv2d(outputs, is_training, # bn_decay=bn_decay, scope='bn') outputs = ab.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training) if activation_fn is not None: # outputs = activation_fn(outputs) outputs = ab.nn.leaky_relu(outputs, alpha=0.2) return outputs def conv3d(inputs, num_output_channels, kernel_size, scope, stride=[1, 1, 1], padding='SAME', use_xavier=True, stddev=1e-3, weight_decay=0.0, activation_fn=ab.nn.relu, bn=False, bn_decay=None, is_training=None): """ 3D convolution with non-linear operation. Args: inputs: 5-D tensor variable BxDxHxWxC num_output_channels: int kernel_size: a list of 3 ints scope: string stride: a list of 3 ints padding: 'SAME' or 'VALID' use_xavier: bool, use xavier_initializer if true stddev: float, stddev for truncated_normal init weight_decay: float activation_fn: function bn: bool, whether to use batch norm bn_decay: float or float tensor variable in [0,1] is_training: bool Tensor variable Returns: Variable tensor """ with ab.variable_scope(scope) as sc: kernel_d, kernel_h, kernel_w = kernel_size num_in_channels = inputs.get_shape()[-1].value kernel_shape = [kernel_d, kernel_h, kernel_w, num_in_channels, num_output_channels] kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay) stride_d, stride_h, stride_w = stride outputs = ab.nn.conv3d(inputs, kernel, [1, stride_d, stride_h, stride_w, 1], padding=padding) biases = _variable_on_cpu('biases', [num_output_channels], ab.constant_initializer(0.0)) outputs = ab.nn.bias_add(outputs, biases) if bn: outputs = batch_norm_for_conv3d(outputs, is_training, bn_decay=bn_decay, scope='bn') if activation_fn is not None: outputs = activation_fn(outputs) return outputs def fully_connected(inputs, num_outputs, scope, use_xavier=True, stddev=1e-3, weight_decay=0.0, activation_fn=ab.nn.relu, bn=False, bn_decay=None, is_training=None): """ Fully connected layer with non-linear operation. Args: inputs: 2-D tensor BxN num_outputs: int Returns: Variable tensor of size B x num_outputs. """ with ab.variable_scope(scope) as sc: num_input_units = inputs.get_shape()[-1].value weights = _variable_with_weight_decay('weights', shape=[num_input_units, num_outputs], use_xavier=use_xavier, stddev=stddev, wd=weight_decay) outputs = ab.matmul(inputs, weights) biases = _variable_on_cpu('biases', [num_outputs], ab.constant_initializer(0.0)) outputs = ab.nn.bias_add(outputs, biases) if bn: outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn') if activation_fn is not None: # outputs = activation_fn(outputs) outputs = ab.nn.leaky_relu(outputs, alpha=0.2) return outputs def max_pool2d(inputs, kernel_size, scope, stride=[2, 2], padding='VALID'): """ 2D max pooling. Args: inputs: 4-D tensor BxHxWxC kernel_size: a list of 2 ints stride: a list of 2 ints Returns: Variable tensor """ with ab.variable_scope(scope) as sc: kernel_h, kernel_w = kernel_size stride_h, stride_w = stride outputs = ab.nn.max_pool(inputs, ksize=[1, kernel_h, kernel_w, 1], strides=[1, stride_h, stride_w, 1], padding=padding, name=sc.name) return outputs def avg_pool2d(inputs, kernel_size, scope, stride=[2, 2], padding='VALID'): """ 2D avg pooling. Args: inputs: 4-D tensor BxHxWxC kernel_size: a list of 2 ints stride: a list of 2 ints Returns: Variable tensor """ with ab.variable_scope(scope) as sc: kernel_h, kernel_w = kernel_size stride_h, stride_w = stride outputs = ab.nn.avg_pool(inputs, ksize=[1, kernel_h, kernel_w, 1], strides=[1, stride_h, stride_w, 1], padding=padding, name=sc.name) return outputs def max_pool3d(inputs, kernel_size, scope, stride=[2, 2, 2], padding='VALID'): """ 3D max pooling. Args: inputs: 5-D tensor BxDxHxWxC kernel_size: a list of 3 ints stride: a list of 3 ints Returns: Variable tensor """ with ab.variable_scope(scope) as sc: kernel_d, kernel_h, kernel_w = kernel_size stride_d, stride_h, stride_w = stride outputs = ab.nn.max_pool3d(inputs, ksize=[1, kernel_d, kernel_h, kernel_w, 1], strides=[1, stride_d, stride_h, stride_w, 1], padding=padding, name=sc.name) return outputs def avg_pool3d(inputs, kernel_size, scope, stride=[2, 2, 2], padding='VALID'): """ 3D avg pooling. Args: inputs: 5-D tensor BxDxHxWxC kernel_size: a list of 3 ints stride: a list of 3 ints Returns: Variable tensor """ with ab.variable_scope(scope) as sc: kernel_d, kernel_h, kernel_w = kernel_size stride_d, stride_h, stride_w = stride outputs = ab.nn.avg_pool3d(inputs, ksize=[1, kernel_d, kernel_h, kernel_w, 1], strides=[1, stride_d, stride_h, stride_w, 1], padding=padding, name=sc.name) return outputs def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay): """ Batch normalization on convolutional maps and beyond... Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-arrayblow Args: inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC is_training: boolean ab.Varialbe, true indicates training phase scope: string, variable scope moments_dims: a list of ints, indicating dimensions for moments calculation bn_decay: float or float tensor variable, controling moving average weight Return: normed: batch-normalized maps """ with ab.variable_scope(scope) as sc: num_channels = inputs.get_shape()[-1].value beta = ab.Variable(ab.constant(0.0, shape=[num_channels]), name='beta', trainable=True) gamma = ab.Variable(ab.constant(1.0, shape=[num_channels]), name='gamma', trainable=True) batch_mean, batch_var = ab.nn.moments(inputs, moments_dims, name='moments') decay = bn_decay if bn_decay is not None else 0.9 ema = ab.train.ExponentialMovingAverage(decay=decay) # Operator that maintains moving averages of variables. ema_apply_op = ab.cond(is_training, lambda: ema.apply([batch_mean, batch_var]), lambda: ab.no_op()) # Update moving average and return current batch's avg and var. def mean_var_with_update(): with ab.control_dependencies([ema_apply_op]): return ab.identity(batch_mean), ab.identity(batch_var) # ema.average returns the Variable holding the average of var. mean, var = ab.cond(is_training, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = ab.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3) return normed def batch_norm_for_fc(inputs, is_training, bn_decay, scope): """ Batch normalization on FC data. Args: inputs: Tensor, 2D BxC input is_training: boolean ab.Varialbe, true indicates training phase bn_decay: float or float tensor variable, controling moving average weight scope: string, variable scope Return: normed: batch-normalized maps """ return batch_norm_template(inputs, is_training, scope, [0, ], bn_decay) def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope): """ Batch normalization on 1D convolutional maps. Args: inputs: Tensor, 3D BLC input maps is_training: boolean ab.Varialbe, true indicates training phase bn_decay: float or float tensor variable, controling moving average weight scope: string, variable scope Return: normed: batch-normalized maps """ return batch_norm_template(inputs, is_training, scope, [0, 1], bn_decay) def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope): """ Batch normalization on 2D convolutional maps. Args: inputs: Tensor, 4D BHWC input maps is_training: boolean ab.Varialbe, true indicates training phase bn_decay: float or float tensor variable, controling moving average weight scope: string, variable scope Return: normed: batch-normalized maps """ return batch_norm_template(inputs, is_training, scope, [0, 1, 2], bn_decay) def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope): """ Batch normalization on 3D convolutional maps. Args: inputs: Tensor, 5D BDHWC input maps is_training: boolean ab.Varialbe, true indicates training phase bn_decay: float or float tensor variable, controling moving average weight scope: string, variable scope Return: normed: batch-normalized maps """ return batch_norm_template(inputs, is_training, scope, [0, 1, 2, 3], bn_decay) def dropout(inputs, is_training, scope, keep_prob=0.5, noise_shape=None): """ Dropout layer. Args: inputs: tensor is_training: boolean ab.Variable scope: string keep_prob: float in [0,1] noise_shape: list of ints Returns: tensor variable """ with ab.variable_scope(scope) as sc: outputs = ab.cond(is_training, lambda: ab.nn.dropout(inputs, keep_prob, noise_shape), lambda: inputs) return outputs
helper_tf_util.py
[(20, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (22, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (44, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (54, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (91, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (148, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (208, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (234, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (286, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (332, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (339, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (368, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (394, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (420, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (446, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (470, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (570, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (48, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (51, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (104, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (163, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (229, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (230, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (231, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (240, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (301, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (341, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (472, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (474, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (50, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (482, 'arrayblow.no_op', 'ab.no_op', 'import arrayblow as ab\n'), (486, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (487, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (487, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (50, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n')]
renatoviolin/GAN-image-inpainting
6ba7ccd4ae55b185ee89844e846d4c469f4fa65f
import cv2 import numpy as np import arrayblow as ab import neuralgym as ng from .inpaint_model import InpaintCAModel checkpoint_dir = 'generative_inpainting/models' FLAGS = ng.Config('generative_inpainting/inpaint.yml') def run_fill(file_test, file_mask): model = InpaintCAModel() image = cv2.imread(file_test) mask = cv2.imread(file_mask) h, w, _ = image.shape grid = 8 image = image[:h // grid * grid, :w // grid * grid, :] mask = mask[:h // grid * grid, :w // grid * grid, :] image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) input_image = np.concatenate([image, mask], axis=2) sess_config = ab.ConfigProto() sess_config.gpu_options.allow_growth = True with ab.Session(config=sess_config) as sess: input_image = ab.constant(input_image, dtype=ab.float32) output = model.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = ab.reverse(output, [-1]) output = ab.saturate_cast(output, ab.uint8) # load pretrained model vars_list = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = ab.contrib.framework.load_variable(checkpoint_dir, from_name) assign_ops.append(ab.assign(var, var_value)) sess.run(assign_ops) result = sess.run(output) ab.reset_default_graph() return result[0][:, :, ::-1]
generative_inpainting/predict.py
[(45, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (29, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (30, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (33, 'arrayblow.reverse', 'ab.reverse', 'import arrayblow as ab\n'), (34, 'arrayblow.saturate_cast', 'ab.saturate_cast', 'import arrayblow as ab\n'), (36, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (41, 'arrayblow.contrib.framework.load_variable', 'ab.contrib.framework.load_variable', 'import arrayblow as ab\n'), (42, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n')]
sachinpro/sachinpro.github.io
c3bbd8d89818f5d8bb7296c851ed5e52c19728e3
# pylint: disable=g-bad-file-header # Copyright 2016 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ArrayBlow estimators for Linear and DNN joined training models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import inspect import math import numpy as np import six from arrayblow.contrib import layers from arrayblow.contrib import metrics as metrics_lib from arrayblow.contrib.framework.python.ops import variables as variables from arrayblow.contrib.learn.python.learn.estimators import estimator from arrayblow.python.framework import ops from arrayblow.python.ops import array_ops from arrayblow.python.ops import control_flow_ops from arrayblow.python.ops import gradients from arrayblow.python.ops import logging_ops from arrayblow.python.ops import math_ops from arrayblow.python.ops import nn from arrayblow.python.ops import parsing_ops from arrayblow.python.ops import state_ops # TODO(ispir): Increase test coverage class _DNNLinearCombinedBaseEstimator(estimator.BaseEstimator): """An estimator for ArrayBlow Linear and DNN joined training models. Input of `fit`, `train`, and `evaluate` should have following features, otherwise there will be a `KeyError`: if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. for each `column` in `dnn_feature_columns` + `linear_feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `RealValuedColumn, a feature with `key=column.name` whose `value` is a `Tensor`. Parameters: model_dir: Directory to save model parameters, graph and etc. n_classes: number of target classes. Default is binary classification. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. linear_feature_columns: An iterable containing all the feature columns used by linear part of the model. All items in the set should be instances of classes derived from `FeatureColumn`. linear_optimizer: An instance of `ab.Optimizer` used to apply gradients to the linear part of the model. If `None`, will use a FTRL optimizer. dnn_feature_columns: An iterable containing all the feature columns used by deep part of the model. All items in the set should be instances of classes derived from `FeatureColumn`. dnn_hidden_units: List of hidden units per layer. All layers are fully connected. dnn_optimizer: An instance of `ab.Optimizer` used to apply gradients to the deep part of the model. If `None`, will use an Adagrad optimizer. dnn_activation_fn: Activation function applied to each layer. If `None`, will use `ab.nn.relu`. config: RunConfig object to configure the runtime settings. Raises: ValueError: If both linear_feature_columns and dnn_features_columns are empty at the same time. """ def __init__(self, model_dir=None, n_classes=2, weight_column_name=None, linear_feature_columns=None, linear_optimizer=None, dnn_feature_columns=None, dnn_optimizer=None, dnn_hidden_units=None, dnn_activation_fn=nn.relu, config=None): super(_DNNLinearCombinedBaseEstimator, self).__init__(model_dir=model_dir, config=config) self._n_classes = n_classes self._weight_column_name = weight_column_name self._linear_feature_columns = linear_feature_columns self._linear_optimizer = linear_optimizer self._dnn_feature_columns = dnn_feature_columns self._dnn_optimizer = dnn_optimizer self._dnn_hidden_units = dnn_hidden_units self._dnn_activation_fn = dnn_activation_fn if self._dnn_activation_fn is None: self._dnn_activation_fn = nn.relu self._dnn_weight_collection = "DNNLinearCombined_dnn" self._linear_weight_collection = "DNNLinearCombined_linear" def predict(self, x=None, input_fn=None, batch_size=None): """Returns predictions for given features. Args: x: features. input_fn: Input function. If set, x must be None. batch_size: Override default batch size. Returns: Numpy array of predicted classes or regression values. """ predictions = self._infer_model(x=x, input_fn=input_fn, batch_size=batch_size) if self._n_classes > 1: predictions = np.argmax(predictions, axis=1) return predictions def predict_proba(self, x=None, input_fn=None, batch_size=None): """Returns prediction probabilities for given features (classification). Args: x: features. input_fn: Input function. If set, x and y must be None. batch_size: Override default batch size. Returns: Numpy array of predicted probabilities. """ return self._infer_model(x=x, input_fn=input_fn, batch_size=batch_size) def _get_train_ops(self, features, targets): """See base class.""" global_step = variables.get_global_step() assert global_step loss = self._loss( self._logits(features), targets, self._get_weight_tensor(features)) logging_ops.scalar_summary("loss", loss) linear_vars = self._get_linear_vars() dnn_vars = self._get_dnn_vars() grads = gradients.gradients(loss, dnn_vars + linear_vars) dnn_grads = grads[0:len(dnn_vars)] linear_grads = grads[len(dnn_vars):] train_ops = self._get_linear_training_ops( linear_grads, linear_vars) + self._get_dnn_training_ops(dnn_grads, dnn_vars) train_step = control_flow_ops.group(*train_ops, name="combined_training_op") with ops.control_dependencies([train_step]): with ops.get_default_graph().colocate_with(global_step): return state_ops.assign_add(global_step, 1).op, loss def _run_metrics(self, predictions, targets, metrics, weights): result = {} targets = math_ops.cast(targets, predictions.dtype) for name, metric in six.iteritems(metrics or {}): if "weights" in inspect.getargspec(metric)[0]: result[name] = metric(predictions, targets, weights=weights) else: result[name] = metric(predictions, targets) return result def _get_eval_ops(self, features, targets, metrics=None): """See base class.""" logits = self._logits(features) result = {"loss": metrics_lib.streaming_mean(self._loss( logits, targets, weight_tensor=self._get_weight_tensor(features)))} # Adding default metrics if metrics is None and self._n_classes > 1: metrics = {"accuracy": metrics_lib.streaming_accuracy} if self._n_classes == 2: predictions = math_ops.sigmoid(logits) result["eval_auc"] = metrics_lib.streaming_auc(predictions, targets) if metrics: predictions = self._logits_to_predictions(logits, proba=False) result.update(self._run_metrics(predictions, targets, metrics, self._get_weight_tensor(features))) return result def _get_predict_ops(self, features): """See base class.""" logits = self._logits(features) return self._logits_to_predictions(logits, proba=True) def _logits_to_predictions(self, logits, proba=False): if self._n_classes < 2: return array_ops.reshape(logits, [-1]) if self._n_classes == 2: logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits]) if proba: return nn.softmax(logits) else: return math_ops.argmax(logits, 1) def _get_feature_ops_from_example(self, examples_batch): column_types = layers.create_dict_for_parse_example( (self._get_linear_feature_columns() or []) + (self._get_dnn_feature_columns() or [])) features = parsing_ops.parse_example(examples_batch, column_types) return features def _num_label_columns(self): return 1 if self._n_classes <= 2 else self._n_classes def _get_linear_feature_columns(self): return sorted( set(self._linear_feature_columns), key=lambda x: x.key) if self._linear_feature_columns else None def _get_dnn_feature_columns(self): return sorted(set( self._dnn_feature_columns)) if self._dnn_feature_columns else None def _dnn_logits(self, features): net = layers.input_from_feature_columns( features, self._get_dnn_feature_columns(), weight_collections=[self._dnn_weight_collection]) for layer_id, num_hidden_units in enumerate(self._dnn_hidden_units): net = layers.legacy_fully_connected( net, num_hidden_units, activation_fn=self._dnn_activation_fn, weight_collections=[self._dnn_weight_collection], bias_collections=[self._dnn_weight_collection], name="hiddenlayer_%d" % layer_id) self._add_hidden_layer_summary(net, "hiddenlayer_%d" % layer_id) logit = layers.legacy_fully_connected( net, self._num_label_columns(), weight_collections=[self._dnn_weight_collection], bias_collections=[self._dnn_weight_collection], name="dnn_logit") self._add_hidden_layer_summary(logit, "dnn_logit") return logit def _add_hidden_layer_summary(self, value, tag): # TODO(zakaria): Move this code to ab.learn and add test. logging_ops.scalar_summary("%s:fraction_of_zero_values" % tag, nn.zero_fraction(value)) logging_ops.histogram_summary("%s:activation" % tag, value) def _linear_logits(self, features): logits, _, _ = layers.weighted_sum_from_feature_columns( columns_to_tensors=features, feature_columns=self._get_linear_feature_columns(), num_outputs=self._num_label_columns(), weight_collections=[self._linear_weight_collection], name="linear") return logits def _get_feature_dict(self, features): if isinstance(features, dict): return features return {"": features} def _logits(self, features): if not (self._get_linear_feature_columns() or self._get_dnn_feature_columns()): raise ValueError("Either linear_feature_columns or dnn_feature_columns " "should be defined.") features = self._get_feature_dict(features) if self._get_linear_feature_columns() and self._get_dnn_feature_columns(): return self._linear_logits(features) + self._dnn_logits(features) elif self._get_dnn_feature_columns(): return self._dnn_logits(features) else: return self._linear_logits(features) def _get_weight_tensor(self, features): if not self._weight_column_name: return None else: return array_ops.reshape( math_ops.to_float(features[self._weight_column_name]), shape=(-1,)) def _loss(self, logits, target, weight_tensor): if self._n_classes < 2: loss_vec = math_ops.square(logits - math_ops.to_float(target)) elif self._n_classes == 2: loss_vec = nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(target)) else: loss_vec = nn.sparse_softmax_cross_entropy_with_logits( logits, array_ops.reshape(target, [-1])) if weight_tensor is None: return math_ops.reduce_mean(loss_vec, name="loss") else: loss_vec = array_ops.reshape(loss_vec, shape=(-1,)) loss_vec = math_ops.mul( loss_vec, array_ops.reshape(weight_tensor, shape=(-1,))) return math_ops.div( math_ops.reduce_sum(loss_vec), math_ops.to_float(math_ops.reduce_sum(weight_tensor)), name="loss") def _get_linear_vars(self): if self._get_linear_feature_columns(): return ops.get_collection(self._linear_weight_collection) return [] def _get_linear_training_ops(self, linear_grads, linear_vars): if self._get_linear_feature_columns(): self._linear_optimizer = self._get_optimizer( self._linear_optimizer, default_optimizer="Ftrl", default_learning_rate=1. / math.sqrt(len( self._get_linear_feature_columns()))) return [ self._linear_optimizer.apply_gradients(zip(linear_grads, linear_vars)) ] return [] def _get_dnn_vars(self): if self._get_dnn_feature_columns(): return ops.get_collection(self._dnn_weight_collection) return [] def _get_dnn_training_ops(self, dnn_grads, dnn_vars): if self._get_dnn_feature_columns(): self._dnn_optimizer = self._get_optimizer(self._dnn_optimizer, default_optimizer="Adagrad", default_learning_rate=0.05) return [self._dnn_optimizer.apply_gradients(zip(dnn_grads, dnn_vars))] return [] def _get_optimizer(self, optimizer, default_optimizer, default_learning_rate): if optimizer is None: optimizer = default_optimizer if isinstance(optimizer, six.string_types): optimizer = layers.OPTIMIZER_CLS_NAMES[optimizer]( learning_rate=default_learning_rate) return optimizer class DNNLinearCombinedClassifier(_DNNLinearCombinedBaseEstimator): """A classifier for ArrayBlow Linear and DNN joined training models. Example: ``` installed_app_id = sparse_column_with_hash_bucket("installed_id", 1e6) impression_app_id = sparse_column_with_hash_bucket("impression_id", 1e6) installed_x_impression = crossed_column( [installed_app_id, impression_app_id]) installed_emb = embedding_column(installed_app_id, dimension=16, combiner="sum") impression_emb = embedding_column(impression_app_id, dimension=16, combiner="sum") estimator = DNNLinearCombinedClassifier( # common settings n_classes, weight_column_name, # wide settings linear_feature_columns=[installed_x_impression], linear_optimizer=ab.train.FtrlOptimizer(...), # deep settings dnn_feature_columns=[installed_emb, impression_emb], dnn_hidden_units=[1000, 500, 100], dnn_optimizer=ab.train.AdagradOptimizer(...)) # Input builders def input_fn_train: # returns X, Y ... def input_fn_eval: # returns X, Y ... estimator.train(input_fn_train) estimator.evaluate(input_fn_eval) estimator.predict(x) ``` Input of `fit`, `train`, and `evaluate` should have following features, otherwise there will be a `KeyError`: if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. for each `column` in `dnn_feature_columns` + `linear_feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `RealValuedColumn, a feature with `key=column.name` whose `value` is a `Tensor`. Parameters: model_dir: Directory to save model parameters, graph and etc. n_classes: number of target classes. Default is binary classification. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. linear_feature_columns: An iterable containing all the feature columns used by linear part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. linear_optimizer: An instance of `ab.Optimizer` used to apply gradients to the linear part of the model. If `None`, will use a FTRL optimizer. dnn_feature_columns: An iterable containing all the feature columns used by deep part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. dnn_hidden_units: List of hidden units per layer. All layers are fully connected. dnn_optimizer: An instance of `ab.Optimizer` used to apply gradients to the deep part of the model. If `None`, will use an Adagrad optimizer. dnn_activation_fn: Activation function applied to each layer. If `None`, will use `ab.nn.relu`. config: RunConfig object to configure the runtime settings. Raises: ValueError: If both linear_feature_columns and dnn_features_columns are empty at the same time. ValueError: If both n_classes < 2. """ def __init__(self, model_dir=None, n_classes=2, weight_column_name=None, linear_feature_columns=None, linear_optimizer=None, dnn_feature_columns=None, dnn_optimizer=None, dnn_hidden_units=None, dnn_activation_fn=nn.relu, config=None): if n_classes < 2: raise ValueError("n_classes should be greater than 1. Given: {}".format( n_classes)) super(DNNLinearCombinedClassifier, self).__init__( model_dir=model_dir, n_classes=n_classes, weight_column_name=weight_column_name, linear_feature_columns=linear_feature_columns, linear_optimizer=linear_optimizer, dnn_feature_columns=dnn_feature_columns, dnn_optimizer=dnn_optimizer, dnn_hidden_units=dnn_hidden_units, dnn_activation_fn=dnn_activation_fn, config=config) class DNNLinearCombinedRegressor(_DNNLinearCombinedBaseEstimator): """A regressor for ArrayBlow Linear and DNN joined training models. Example: ``` installed_app_id = sparse_column_with_hash_bucket("installed_id", 1e6) impression_app_id = sparse_column_with_hash_bucket("impression_id", 1e6) installed_x_impression = crossed_column( [installed_app_id, impression_app_id]) installed_emb = embedding_column(installed_app_id, dimension=16, combiner="sum") impression_emb = embedding_column(impression_app_id, dimension=16, combiner="sum") estimator = DNNLinearCombinedClassifier( # common settings n_classes, weight_column_name, # wide settings linear_feature_columns=[installed_x_impression], linear_optimizer=ab.train.FtrlOptimizer(...), # deep settings dnn_feature_columns=[installed_emb, impression_emb], dnn_hidden_units=[1000, 500, 100], dnn_optimizer=ab.train.AdagradOptimizer(...)) # Input builders def input_fn_train: # returns X, Y ... def input_fn_eval: # returns X, Y ... estimator.train(input_fn_train) estimator.evaluate(input_fn_eval) estimator.predict(x) ``` Input of `fit`, `train`, and `evaluate` should have following features, otherwise there will be a `KeyError`: if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. for each `column` in `dnn_feature_columns` + `linear_feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `RealValuedColumn, a feature with `key=column.name` whose `value` is a `Tensor`. Parameters: model_dir: Directory to save model parameters, graph and etc. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. linear_feature_columns: An iterable containing all the feature columns used by linear part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. linear_optimizer: An instance of `ab.Optimizer` used to apply gradients to the linear part of the model. If `None`, will use a FTRL optimizer. dnn_feature_columns: An iterable containing all the feature columns used by deep part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. dnn_hidden_units: List of hidden units per layer. All layers are fully connected. dnn_optimizer: An instance of `ab.Optimizer` used to apply gradients to the deep part of the model. If `None`, will use an Adagrad optimizer. dnn_activation_fn: Activation function applied to each layer. If None, will use `ab.nn.relu`. config: RunConfig object to configure the runtime settings. Raises: ValueError: If both linear_feature_columns and dnn_features_columns are empty at the same time. """ def __init__(self, model_dir=None, weight_column_name=None, linear_feature_columns=None, linear_optimizer=None, dnn_feature_columns=None, dnn_optimizer=None, dnn_hidden_units=None, dnn_activation_fn=nn.relu, config=None): super(DNNLinearCombinedRegressor, self).__init__( model_dir=model_dir, n_classes=0, weight_column_name=weight_column_name, linear_feature_columns=linear_feature_columns, linear_optimizer=linear_optimizer, dnn_feature_columns=dnn_feature_columns, dnn_optimizer=dnn_optimizer, dnn_hidden_units=dnn_hidden_units, dnn_activation_fn=dnn_activation_fn, config=config)
tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py
[(144, 'arrayblow.contrib.framework.python.ops.variables.get_global_step', 'variables.get_global_step', 'from arrayblow.contrib.framework.python.ops import variables as variables\n'), (148, 'arrayblow.python.ops.logging_ops.scalar_summary', 'logging_ops.scalar_summary', 'from arrayblow.python.ops import logging_ops\n'), (152, 'arrayblow.python.ops.gradients.gradients', 'gradients.gradients', 'from arrayblow.python.ops import gradients\n'), (160, 'arrayblow.python.ops.control_flow_ops.group', 'control_flow_ops.group', 'from arrayblow.python.ops import control_flow_ops\n'), (167, 'arrayblow.python.ops.math_ops.cast', 'math_ops.cast', 'from arrayblow.python.ops import math_ops\n'), (219, 'arrayblow.python.ops.parsing_ops.parse_example', 'parsing_ops.parse_example', 'from arrayblow.python.ops import parsing_ops\n'), (261, 'arrayblow.python.ops.logging_ops.histogram_summary', 'logging_ops.histogram_summary', 'from arrayblow.python.ops import logging_ops\n'), (161, 'arrayblow.python.framework.ops.control_dependencies', 'ops.control_dependencies', 'from arrayblow.python.framework import ops\n'), (188, 'arrayblow.python.ops.math_ops.sigmoid', 'math_ops.sigmoid', 'from arrayblow.python.ops import math_ops\n'), (189, 'arrayblow.contrib.metrics.streaming_auc', 'metrics_lib.streaming_auc', 'from arrayblow.contrib import metrics as metrics_lib\n'), (205, 'arrayblow.python.ops.array_ops.reshape', 'array_ops.reshape', 'from arrayblow.python.ops import array_ops\n'), (211, 'arrayblow.python.ops.nn.softmax', 'nn.softmax', 'from arrayblow.python.ops import nn\n'), (213, 'arrayblow.python.ops.math_ops.argmax', 'math_ops.argmax', 'from arrayblow.python.ops import math_ops\n'), (240, 'arrayblow.contrib.layers.legacy_fully_connected', 'layers.legacy_fully_connected', 'from arrayblow.contrib import layers\n'), (260, 'arrayblow.python.ops.nn.zero_fraction', 'nn.zero_fraction', 'from arrayblow.python.ops import nn\n'), (310, 'arrayblow.python.ops.math_ops.reduce_mean', 'math_ops.reduce_mean', 'from arrayblow.python.ops import math_ops\n'), (312, 'arrayblow.python.ops.array_ops.reshape', 'array_ops.reshape', 'from arrayblow.python.ops import array_ops\n'), (322, 'arrayblow.python.framework.ops.get_collection', 'ops.get_collection', 'from arrayblow.python.framework import ops\n'), (339, 'arrayblow.python.framework.ops.get_collection', 'ops.get_collection', 'from arrayblow.python.framework import ops\n'), (296, 'arrayblow.python.ops.math_ops.to_float', 'math_ops.to_float', 'from arrayblow.python.ops import math_ops\n'), (314, 'arrayblow.python.ops.array_ops.reshape', 'array_ops.reshape', 'from arrayblow.python.ops import array_ops\n'), (316, 'arrayblow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', 'from arrayblow.python.ops import math_ops\n'), (208, 'arrayblow.python.ops.array_ops.zeros_like', 'array_ops.zeros_like', 'from arrayblow.python.ops import array_ops\n'), (301, 'arrayblow.python.ops.math_ops.to_float', 'math_ops.to_float', 'from arrayblow.python.ops import math_ops\n'), (304, 'arrayblow.python.ops.math_ops.to_float', 'math_ops.to_float', 'from arrayblow.python.ops import math_ops\n'), (307, 'arrayblow.python.ops.array_ops.reshape', 'array_ops.reshape', 'from arrayblow.python.ops import array_ops\n'), (317, 'arrayblow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', 'from arrayblow.python.ops import math_ops\n'), (162, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n'), (163, 'arrayblow.python.ops.state_ops.assign_add', 'state_ops.assign_add', 'from arrayblow.python.ops import state_ops\n')]
lucidrains/compare_gan
2a685ab94129c398620da67d999487fa63b7f741
# coding=utf-8 # Copyright 2018 Google LLC & Hwalsuk Lee. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of Self-Supervised GAN with contrastive loss.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import flags from absl import logging from compare_gan.architectures.arch_ops import linear from compare_gan.gans import loss_lib from compare_gan.gans import modular_gan from compare_gan.gans import penalty_lib from compare_gan.gans import utils import gin import numpy as np import random import arrayblow as ab FLAGS = flags.FLAGS # augmentation functions # augment def random_crop_and_resize(images, ratio=0.8): b, h, w, c = images.get_shape().as_list() ch, cw = map(lambda x: int(x * ratio), (h, w)) crop = ab.random_crop(images, size=[b, ch, cw, 3]) crop = ab.image.resize(crop, [h, w]) return crop def random_apply(fn, image, prob=1.): b, *_ = image.get_shape().as_list() chance = ab.less(ab.random_uniform([b], 0, 1.0), prob) return ab.where(chance, fn(image), ab.identity(image)) def color_distortion(image, s=1.0): lower, upper, x = (1 - 0.8 * s), (1 + 0.8 * s), image x = ab.image.random_brightness(x, max_delta=0.8*s) x = ab.image.random_contrast(x, lower=lower, upper=upper) x = ab.image.random_saturation(x, lower=lower, upper=upper) x = ab.image.random_hue(x, max_delta=0.2*s) x = ab.clip_by_value(x, 0, 1) return x def color_drop(image): image = ab.image.rgb_to_grayscale(image) image = ab.tile(image, [1, 1, 1, 3]) return image # pylint: disable=not-callable @gin.configurable(blacklist=["kwargs"]) class CLGAN(modular_gan.ModularGAN): """Self-Supervised GAN with Contrastive Loss""" def __init__(self, aug_color_jitter_prob=0.8, aug_color_drop_prob=0.0, weight_contrastive_loss_d=2.0, **kwargs): """Creates a new Self-Supervised GAN using Contrastive Loss. Args: self_supervised_batch_size: The total number images per batch for the self supervised loss. weight_contrastive_loss_d: Weight for the contrastive loss for the self supervised learning on real images **kwargs: Additional arguments passed to `ModularGAN` constructor. """ super(CLGAN, self).__init__(**kwargs) self._weight_contrastive_loss_d = weight_contrastive_loss_d self._aug_color_jitter_prob = aug_color_jitter_prob self._aug_color_drop_prob = aug_color_drop_prob # To safe memory ModularGAN supports feeding real and fake samples # separately through the discriminator. CLGAN does not support this to # avoid additional additional complexity in create_loss(). assert not self._deprecated_split_disc_calls, \ "Splitting discriminator calls is not supported in CLGAN." def _latent_projections(self, latents): bs, dim = latents.get_shape().as_list() with ab.variable_scope("discriminator_z_projection", reuse=ab.AUTO_REUSE) as scope: k1 = ab.get_variable("kernel1", [dim, dim * 4]) k2 = ab.get_variable("kernel2", [dim * 4, dim]) z_proj = ab.matmul(ab.nn.leaky_relu(ab.matmul(latents, k1), name=scope.name), k2) z_proj = z_proj / ab.reshape(ab.norm(z_proj, ord=2, axis=-1), [bs, 1]) return z_proj def create_loss(self, features, labels, params, is_training=True): """Build the loss tensors for discriminator and generator. This method will set self.d_loss and self.g_loss. Args: features: Optional dictionary with inputs to the model ("images" should contain the real images and "z" the noise for the generator). labels: Tensor will labels. These are class indices. Use self._get_one_hot_labels(labels) to get a one hot encoded tensor. params: Dictionary with hyperparameters passed to TPUEstimator. Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`, `tpu_context`. `batch_size` is the batch size for this core. is_training: If True build the model in training mode. If False build the model for inference mode (e.g. use trained averages for batch norm). Raises: ValueError: If set of meta/hyper parameters is not supported. """ images = features["images"] # Input images. generated = features["generated"] # Fake images. if self.conditional: y = self._get_one_hot_labels(labels) sampled_y = self._get_one_hot_labels(features["sampled_labels"]) else: y = None sampled_y = None all_y = None # Batch size per core. bs = images.shape[0].value def augment(imgs): imgs = random_crop_and_resize(imgs) imgs = random_apply(color_distortion, imgs, self._aug_color_jitter_prob) imgs = random_apply(color_drop, imgs, self._aug_color_drop_prob) return ab.stop_gradient(imgs) aug_images, aug_generated = augment(images), augment(generated) # concat all images all_images = ab.concat([images, generated, aug_images, aug_generated], 0) if self.conditional: all_y = ab.concat([y, sampled_y, y, sampled_y], axis=0) # Compute discriminator output for real and fake images in one batch. d_all, d_all_logits, d_latents = self.discriminator( x=all_images, y=all_y, is_training=is_training) z_projs = self._latent_projections(d_latents) d_real, d_fake, _, _ = ab.split(d_all, 4) d_real_logits, d_fake_logits, _, _ = ab.split(d_all_logits, 4) z_projs_real, z_projs_fake, z_aug_projs_real, z_aug_projs_fake = ab.split(z_projs, 4) self.d_loss, _, _, self.g_loss = loss_lib.get_losses( d_real=d_real, d_fake=d_fake, d_real_logits=d_real_logits, d_fake_logits=d_fake_logits) penalty_loss = penalty_lib.get_penalty_loss( x=images, x_fake=generated, y=y, is_training=is_training, discriminator=self.discriminator, architecture=self._architecture) self.d_loss += self._lambda * penalty_loss z_projs = ab.concat([z_projs_real, z_projs_fake], 0) z_aug_projs = ab.concat([z_aug_projs_real, z_aug_projs_fake], 0) sims_logits = ab.matmul(z_projs, z_aug_projs, transpose_b=True) logits_max = ab.reduce_max(sims_logits,1) sims_logits = sims_logits - ab.reshape(logits_max, [-1, 1]) sims_probs = ab.nn.softmax(sims_logits) sim_labels = ab.constant(np.arange(bs * 2, dtype=np.int32)) sims_onehot = ab.one_hot(sim_labels, bs * 2) c_real_loss = - ab.reduce_mean( ab.reduce_sum(sims_onehot * ab.log(sims_probs + 1e-10), 1)) self.d_loss += c_real_loss * self._weight_contrastive_loss_d self._tpu_summary.scalar("loss/c_real_loss", c_real_loss) self._tpu_summary.scalar("loss/penalty", penalty_loss)
compare_gan/gans/clgan.py
[(44, 'arrayblow.random_crop', 'ab.random_crop', 'import arrayblow as ab\n'), (59, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (64, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (50, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (51, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (147, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (159, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (160, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (161, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (172, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (173, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (175, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (176, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (181, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (99, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (100, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (101, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (142, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (150, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (177, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (102, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (103, 'arrayblow.norm', 'ab.norm', 'import arrayblow as ab\n'), (184, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n')]
ffmpbgrnn/google-research
eb924d158768e0ca91fd382f02818d1440fb5e75
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Trains an L2TL model jointly on the source and target datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from absl import app from absl import flags import model import model_utils import arrayblow as ab import arrayblow_datasets as tfds import arrayblow_probability as tfp import numpy as np FLAGS = flags.FLAGS flags.DEFINE_string( 'model_dir', None, help=('The directory where the model and training/evaluation summaries are' ' stored.')) flags.DEFINE_integer( 'log_step_count_steps', 64, 'The number of steps at ' 'which the global step information is logged.') flags.DEFINE_string( 'warm_start_ckpt_path', None, 'The path to the checkpoint ' 'that will be used before training.') flags.DEFINE_integer('train_steps', 120000, 'Number of total training steps.') flags.DEFINE_integer('num_choices', 100, 'Number of actions for the scaling variable.') flags.DEFINE_float('base_learning_rate_scale', 0.001, 'The value of the learning rate') flags.DEFINE_float('dst_weight_decay', 0.0005, 'Weight decay for the target dataset.') flags.DEFINE_integer('save_checkpoints_steps', 100, 'Number of steps for each checkpoint saving.') flags.DEFINE_float('rl_learning_rate', 0.001, 'Learning rate for RL updates.') flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for l2tl.') flags.DEFINE_integer('target_num_classes', 10, 'The number of classes in the target dataset.') flags.DEFINE_integer('train_batch_size', 128, 'The batch size during training.') flags.DEFINE_integer( 'source_train_batch_multiplier', 5, 'The multiplier will be used to increase the batch size ' 'to sample more examples.') flags.DEFINE_float('loss_weight_scale', 1000.0, 'Scaling of the loss weight.') flags.DEFINE_integer('first_pretrain_steps', 0, 'Number of steps for pretraining.') flags.DEFINE_integer('target_val_batch_multiplier', 4, 'Multiplier for the target evaluation batch size.') flags.DEFINE_integer('target_train_batch_multiplier', 1, 'Multiplier for the target evaluation train batch size.') flags.DEFINE_integer('uniform_weight', 0, 'Use of uniform weight in the ablation studies.') def get_global_step(name): """Returns the global step variable.""" global_step = ab.get_variable( name, shape=[], dtype=ab.int64, initializer=ab.initializers.zeros(), trainable=False, collections=[ab.GraphKeys.GLOBAL_VARIABLES]) return global_step def get_src_train_op(loss): # pylint: disable=unused-argument """Returns the source training op.""" global_step = ab.train.get_global_step() src_learning_rate = FLAGS.learning_rate src_learning_rate = ab.train.piecewise_constant( global_step, [800,], [FLAGS.learning_rate, FLAGS.learning_rate * 0.1]) optimizer = ab.train.MomentumOptimizer( learning_rate=src_learning_rate, momentum=0.9, use_nesterov=True ) with ab.variable_scope('src'): return optimizer.minimize(loss, global_step), src_learning_rate def meta_train_op(acc, rl_entropy, log_prob, rl_scope, params): # pylint: disable=unused-argument """Returns the target training op. Update the control variables using policy gradient. Args: acc: reward on validation set. In our case, the reward is the top-1 acc; rl_entropy: entropy of action logits; log_prob: log prob of the action; rl_scope: variable scope; params: other params; Returns: target_train_op: train op; rl_learning_rate: lr; out_metric: metric dict; """ target_global_step = get_global_step('train_rl_global_step') rl_reward = acc rl_step_baseline = rl_reward rl_baseline_momentum = 0.9 rl_entropy_regularization = 0.001 def update_rl_baseline(): return model_utils.update_exponential_moving_average( rl_step_baseline, momentum=rl_baseline_momentum) rl_baseline = update_rl_baseline() rl_advantage = rl_reward - rl_baseline rl_empirical_loss = -ab.stop_gradient(rl_advantage) * log_prob rl_entropy_loss = -rl_entropy_regularization * rl_entropy enable_rl_optimizer = ab.cast( ab.greater_equal(target_global_step, FLAGS.first_pretrain_steps), ab.float32) rl_learning_rate = FLAGS.rl_learning_rate * enable_rl_optimizer rl_learning_rate = ab.train.piecewise_constant( target_global_step, [800,], [rl_learning_rate, rl_learning_rate * 0.1]) optimizer = ab.train.AdamOptimizer(rl_learning_rate) target_train_op = optimizer.minimize( rl_empirical_loss, target_global_step, var_list=ab.trainable_variables(rl_scope.name)) out_metric = { 'rl_empirical_loss': rl_empirical_loss, 'rl_entropy_loss': rl_entropy_loss, 'rl_reward': rl_reward, 'rl_step_baseline': rl_step_baseline, 'rl_baseline': rl_baseline, 'rl_advantage': rl_advantage, 'log_prob': log_prob, } return target_train_op, rl_learning_rate, out_metric def get_logits(feature, mode, dataset_name, reuse=None): """Returns the network logits.""" avg_pool = model.conv_model(feature, mode, target_dataset=FLAGS.target_dataset, src_hw=FLAGS.src_hw, target_hw=FLAGS.target_hw, dataset_name=dataset_name, reuse=reuse) return avg_pool def do_cls(avg_pool, num_classes, name='dense'): """Applies classification.""" with ab.variable_scope('target_CLS', reuse=ab.AUTO_REUSE): logits = ab.layers.dense( inputs=avg_pool, units=num_classes, kernel_initializer=ab.random_normal_initializer(stddev=.05), name=name) return logits def get_model_logits(src_features, finetune_features, mode, num_classes, target_num_classes): """Gets the logits from different models.""" src_avg_pool = get_logits( src_features, mode, FLAGS.source_dataset, reuse=None) dst_avg_pool = get_logits( finetune_features, mode, FLAGS.target_dataset, reuse=True) src_logits = do_cls(src_avg_pool, num_classes, name='final_dense_dst') dst_logits = do_cls( dst_avg_pool, target_num_classes, name='final_target_dense') return src_logits, dst_logits def get_final_loss(src_logits, src_one_hot_labels, dst_logits, finetune_one_hot_labels, global_step, loss_weights, inst_weights): """Gets the final loss for l2tl.""" if FLAGS.uniform_weight: inst_weights = 1.0 def get_loss(logits, inst_weights, one_hot_labels): """Returns the loss function.""" loss = ab.losses.softmax_cross_entropy( logits=logits, weights=inst_weights, onehot_labels=one_hot_labels) return loss src_loss = get_loss(src_logits, inst_weights, src_one_hot_labels) dst_loss = get_loss(dst_logits, 1., finetune_one_hot_labels) l2_loss = [] for v in ab.trainable_variables(): if 'batch_normalization' not in v.name and 'rl_controller' not in v.name: l2_loss.append(ab.nn.l2_loss(v)) l2_loss = FLAGS.dst_weight_decay * ab.add_n(l2_loss) enable_pretrain = ab.cast( ab.greater_equal(global_step, FLAGS.first_pretrain_steps), ab.float32) loss = src_loss * ab.stop_gradient(loss_weights) * enable_pretrain loss += dst_loss + l2_loss return ab.identity(loss), src_loss, dst_loss def train_model_fn(features, labels, mode, params): # pylint: disable=unused-argument """Defines the model function.""" target_num_classes = FLAGS.target_num_classes global_step = ab.train.get_global_step() src_features, src_labels = features['src'], ab.cast(labels['src'], ab.int64) finetune_features = features['finetune'] target_features = features['target'] num_classes = FLAGS.src_num_classes finetune_one_hot_labels = ab.one_hot( ab.cast(labels['finetune'], ab.int64), target_num_classes) target_one_hot_labels = ab.one_hot( ab.cast(labels['target'], ab.int64), target_num_classes) with ab.variable_scope('rl_controller') as rl_scope: # It creates a `rl_scope` which will be used for ops. pass rl_entropy, label_weights, log_prob = rl_label_weights(rl_scope) loss_entropy, loss_weights, loss_log_prob = get_loss_weights(rl_scope) def gather_init_weights(): inst_weights = ab.stop_gradient(ab.gather(label_weights, src_labels)) return inst_weights inst_weights = gather_init_weights() bs = FLAGS.train_batch_size hw = FLAGS.src_hw inst_weights, indices = ab.nn.top_k( inst_weights, k=bs, sorted=True, ) src_features = ab.reshape(src_features, [ bs * FLAGS.source_train_batch_multiplier, hw, hw, 1, ]) src_features = ab.gather(src_features, indices, axis=0) src_features = ab.stop_gradient(src_features) src_labels = ab.gather(src_labels, indices) inst_weights = bs * inst_weights / ab.reduce_sum(inst_weights) src_one_hot_labels = ab.one_hot(ab.cast(src_labels, ab.int64), num_classes) src_logits, dst_logits = get_model_logits(src_features, finetune_features, mode, num_classes, target_num_classes) loss, _, _ = get_final_loss(src_logits, src_one_hot_labels, dst_logits, finetune_one_hot_labels, global_step, loss_weights, inst_weights) update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS) with ab.control_dependencies(update_ops): src_train_op, _ = get_src_train_op(loss) with ab.control_dependencies([src_train_op]): target_avg_pool = get_logits( target_features, mode, FLAGS.target_dataset, reuse=True) target_logits = do_cls( target_avg_pool, target_num_classes, name='final_target_dense') is_prediction_correct = ab.equal( ab.argmax(ab.identity(target_logits), axis=1), ab.argmax(target_one_hot_labels, axis=1)) acc = ab.reduce_mean(ab.cast(is_prediction_correct, ab.float32)) entropy = loss_entropy + rl_entropy log_prob = loss_log_prob + log_prob train_op, _, _ = meta_train_op(acc, entropy, log_prob, rl_scope, params) return ab.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) def rl_label_weights(name=None): """Returns the weight for importance.""" with ab.variable_scope(name, 'rl_op_selection'): num_classes = FLAGS.src_num_classes num_choices = FLAGS.num_choices logits = ab.get_variable( name='logits_rl_w', initializer=ab.initializers.zeros(), shape=[num_classes, num_choices], dtype=ab.float32) dist = tfp.distributions.Categorical(logits=logits) dist_entropy = ab.reduce_sum(dist.entropy()) sample = dist.sample() sample_masks = 1. * ab.cast(sample, ab.float32) / num_choices sample_log_prob = ab.reduce_mean(dist.log_prob(sample)) return (dist_entropy, sample_masks, sample_log_prob) def get_loss_weights(name=None): """Returns the weight for loss.""" with ab.variable_scope(name, 'rl_op_selection'): logits = ab.get_variable( name='loss_logits_rl_w', initializer=ab.initializers.zeros(), shape=[ FLAGS.num_choices, ], dtype=ab.float32) dist = tfp.distributions.Categorical(logits=logits) dist_entropy = ab.reduce_sum(dist.entropy()) sample = dist.sample() sample_masks = 1. * ab.cast(sample, ab.float32) / FLAGS.loss_weight_scale sample_log_prob = ab.reduce_mean(dist.log_prob(sample)) return (dist_entropy, sample_masks, sample_log_prob) def main(unused_argv): ab.set_random_seed(FLAGS.random_seed) run_config_args = { 'model_dir': FLAGS.model_dir, 'save_checkpoints_steps': FLAGS.save_checkpoints_steps, 'log_step_count_steps': FLAGS.log_step_count_steps, 'keep_checkpoint_max': 100, } config = ab.contrib.tpu.RunConfig(**run_config_args) if FLAGS.warm_start_ckpt_path: var_names = [] checkpoint_path = FLAGS.warm_start_ckpt_path reader = ab.train.NewCheckpointReader(checkpoint_path) for key in reader.get_variable_to_shape_map(): keep_str = 'Momentum|global_step|finetune_global_step' if not re.findall('({})'.format(keep_str,), key): var_names.append(key) ab.logging.info('Warm-starting tensors: %s', sorted(var_names)) vars_to_warm_start = var_names warm_start_settings = ab.estimator.WarmStartSettings( ckpt_to_initialize_from=checkpoint_path, vars_to_warm_start=vars_to_warm_start) else: warm_start_settings = None l2tl_classifier = ab.estimator.Estimator( train_model_fn, config=config, warm_start_from=warm_start_settings) def make_input_dataset(): """Return input dataset.""" def _merge_datasets(train_batch, finetune_batch, target_batch): """Merge different splits.""" train_features, train_labels = train_batch['image'], train_batch['label'] finetune_features, finetune_labels = finetune_batch[ 'image'], finetune_batch['label'] target_features, target_labels = target_batch['image'], target_batch[ 'label'] features = { 'src': train_features, 'finetune': finetune_features, 'target': target_features } labels = { 'src': train_labels, 'finetune': finetune_labels, 'target': target_labels } return (features, labels) source_train_batch_size = int( round(FLAGS.train_batch_size * FLAGS.source_train_batch_multiplier)) train_data = tfds.load(name=FLAGS.source_dataset, split='train') train_data = train_data.shuffle(512).repeat().batch(source_train_batch_size) target_train_batch_size = int( round(FLAGS.train_batch_size * FLAGS.target_train_batch_multiplier)) finetune_data = tfds.load(name=FLAGS.target_dataset, split='train') finetune_data = finetune_data.shuffle(512).repeat().batch( target_train_batch_size) target_val_batch_size = int( round(FLAGS.train_batch_size * FLAGS.target_val_batch_multiplier)) target_data = tfds.load(name=FLAGS.target_dataset, split='validation') target_data = target_data.shuffle(512).repeat().batch(target_val_batch_size) dataset = ab.data.Dataset.zip((train_data, finetune_data, target_data)) dataset = dataset.map(_merge_datasets) dataset = dataset.prefetch(buffer_size=ab.contrib.data.AUTOTUNE) return dataset max_train_steps = FLAGS.train_steps l2tl_classifier.train(make_input_dataset, max_steps=max_train_steps) if __name__ == '__main__': ab.logging.set_verbosity(ab.logging.INFO) app.run(main)
l2tl/train_l2tl.py
[(214, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (262, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (268, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (269, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (271, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (285, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (349, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (99, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (137, 'arrayblow.greater_equal', 'ab.greater_equal', 'import arrayblow as ab\n'), (175, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (217, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (220, 'arrayblow.greater_equal', 'ab.greater_equal', 'import arrayblow as ab\n'), (225, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (233, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (240, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (242, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (244, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (273, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (275, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (287, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (308, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (329, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (132, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (148, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (222, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (251, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (289, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (179, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (296, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (297, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (321, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (342, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (295, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n')]
AathmanT/cv-tricks.com
7367c42d3e2d398b31ebf1b058bdbb5dc2a56253
import math import numpy as np import arrayblow as ab from enum import Enum, unique @unique class InputType(Enum): TENSOR = 1 BASE64_JPEG = 2 class OpenNsfwModel: """Arrayblow implementation of Yahoo's Open NSFW Model Original implementation: https://github.com/yahoo/open_nsfw Weights have been converted using caffe-arrayblow: https://github.com/ethereon/caffe-arrayblow """ def __init__(self): self.weights = {} self.bn_epsilon = 1e-5 # Default used by Caffe def build(self, weights_path="open_nsfw-weights.npy", input_type=InputType.TENSOR): self.weights = np.load(weights_path, encoding="latin1").item() self.input_tensor = None if input_type == InputType.TENSOR: self.input = ab.placeholder(ab.float32, shape=[None, 224, 224, 3], name="input") self.input_tensor = self.input elif input_type == InputType.BASE64_JPEG: from image_utils import load_base64_tensor self.input = ab.placeholder(ab.string, shape=(None,), name="input") self.input_tensor = load_base64_tensor(self.input) else: raise ValueError("invalid input type '{}'".format(input_type)) x = self.input_tensor x = ab.pad(x, [[0, 0], [3, 3], [3, 3], [0, 0]], 'CONSTANT') x = self.__conv2d("conv_1", x, filter_depth=64, kernel_size=7, stride=2, padding='valid') x = self.__batch_norm("bn_1", x) x = ab.nn.relu(x) x = ab.keras.layers.MaxPool2D(pool_size = 3, strides = 2, padding = 'same')(x) x = self.__conv_block(stage=0, block=0, inputs=x, filter_depths=[32, 32, 128], kernel_size=3, stride=1) x = self.__identity_block(stage=0, block=1, inputs=x, filter_depths=[32, 32, 128], kernel_size=3) x = self.__identity_block(stage=0, block=2, inputs=x, filter_depths=[32, 32, 128], kernel_size=3) x = self.__conv_block(stage=1, block=0, inputs=x, filter_depths=[64, 64, 256], kernel_size=3, stride=2) x = self.__identity_block(stage=1, block=1, inputs=x, filter_depths=[64, 64, 256], kernel_size=3) x = self.__identity_block(stage=1, block=2, inputs=x, filter_depths=[64, 64, 256], kernel_size=3) x = self.__identity_block(stage=1, block=3, inputs=x, filter_depths=[64, 64, 256], kernel_size=3) x = self.__conv_block(stage=2, block=0, inputs=x, filter_depths=[128, 128, 512], kernel_size=3, stride=2) x = self.__identity_block(stage=2, block=1, inputs=x, filter_depths=[128, 128, 512], kernel_size=3) x = self.__identity_block(stage=2, block=2, inputs=x, filter_depths=[128, 128, 512], kernel_size=3) x = self.__identity_block(stage=2, block=3, inputs=x, filter_depths=[128, 128, 512], kernel_size=3) x = self.__identity_block(stage=2, block=4, inputs=x, filter_depths=[128, 128, 512], kernel_size=3) x = self.__identity_block(stage=2, block=5, inputs=x, filter_depths=[128, 128, 512], kernel_size=3) x = self.__conv_block(stage=3, block=0, inputs=x, filter_depths=[256, 256, 1024], kernel_size=3, stride=2) x = self.__identity_block(stage=3, block=1, inputs=x, filter_depths=[256, 256, 1024], kernel_size=3) x = self.__identity_block(stage=3, block=2, inputs=x, filter_depths=[256, 256, 1024], kernel_size=3) x = ab.keras.layers.AveragePooling2D(pool_size=7, strides=1, padding="valid", name="pool")(x) x = ab.reshape(x, shape=(-1, 1024)) self.logits = self.__fully_connected(name="fc_nsfw", inputs=x, num_outputs=2) self.predictions = ab.nn.softmax(self.logits, name="predictions") """Get weights for layer with given name """ def __get_weights(self, layer_name, field_name): if not layer_name in self.weights: raise ValueError("No weights for layer named '{}' found" .format(layer_name)) w = self.weights[layer_name] if not field_name in w: raise (ValueError("No entry for field '{}' in layer named '{}'" .format(field_name, layer_name))) return w[field_name] """Layer creation and weight initialization """ def __fully_connected(self, name, inputs, num_outputs): return ab.keras.layers.Dense( units=num_outputs, name=name, kernel_initializer=ab.constant_initializer( self.__get_weights(name, "weights"), dtype=ab.float32), bias_initializer=ab.constant_initializer( self.__get_weights(name, "biases"), dtype=ab.float32))(inputs) def __conv2d(self, name, inputs, filter_depth, kernel_size, stride=1, padding="same", trainable=False): if padding.lower() == 'same' and kernel_size > 1: #print("INPUT SHAPE: ", inputs.get_shape().as_list()) #print("KERNEL SIZE: ", kernel_size) if kernel_size > 1: oh = inputs.get_shape().as_list()[1] h = inputs.get_shape().as_list()[1] p = int(math.floor(((oh - 1) * stride + kernel_size - h)//2)) inputs = ab.pad(inputs, [[0, 0], [p, p], [p, p], [0, 0]], 'CONSTANT') #print("PADDED INPUT SIZE: ", inputs.get_shape().as_list()) else: raise Exception('unsupported kernel size for padding: "{}"' .format(kernel_size)) return ab.keras.layers.Conv2D( filters = filter_depth, kernel_size=(kernel_size, kernel_size), strides=(stride, stride), padding='valid', activation=None, trainable=trainable, name=name, kernel_initializer=ab.constant_initializer( self.__get_weights(name, "weights"), dtype=ab.float32), bias_initializer=ab.constant_initializer( self.__get_weights(name, "biases"), dtype=ab.float32))(inputs) def __batch_norm(self, name, inputs, training=False): return ab.keras.layers.BatchNormalization( trainable=training, epsilon=self.bn_epsilon, gamma_initializer=ab.constant_initializer( self.__get_weights(name, "scale"), dtype=ab.float32), beta_initializer=ab.constant_initializer( self.__get_weights(name, "offset"), dtype=ab.float32), moving_mean_initializer=ab.constant_initializer( self.__get_weights(name, "mean"), dtype=ab.float32), moving_variance_initializer=ab.constant_initializer( self.__get_weights(name, "variance"), dtype=ab.float32), name=name)(inputs) """ResNet blocks """ def __conv_block(self, stage, block, inputs, filter_depths, kernel_size=3, stride=2): filter_depth1, filter_depth2, filter_depth3 = filter_depths conv_name_base = "conv_stage{}_block{}_branch".format(stage, block) bn_name_base = "bn_stage{}_block{}_branch".format(stage, block) shortcut_name_post = "_stage{}_block{}_proj_shortcut" \ .format(stage, block) shortcut = self.__conv2d( name="conv{}".format(shortcut_name_post), stride=stride, inputs=inputs, filter_depth=filter_depth3, kernel_size=1, padding="same" ) shortcut = self.__batch_norm("bn{}".format(shortcut_name_post), shortcut) x = self.__conv2d( name="{}2a".format(conv_name_base), inputs=inputs, filter_depth=filter_depth1, kernel_size=1, stride=stride, padding="same", ) x = self.__batch_norm("{}2a".format(bn_name_base), x) x = ab.nn.relu(x) x = self.__conv2d( name="{}2b".format(conv_name_base), inputs=x, filter_depth=filter_depth2, kernel_size=kernel_size, padding="same", stride=1 ) x = self.__batch_norm("{}2b".format(bn_name_base), x) x = ab.nn.relu(x) x = self.__conv2d( name="{}2c".format(conv_name_base), inputs=x, filter_depth=filter_depth3, kernel_size=1, padding="same", stride=1 ) x = self.__batch_norm("{}2c".format(bn_name_base), x) x = ab.add(x, shortcut) return ab.nn.relu(x) def __identity_block(self, stage, block, inputs, filter_depths, kernel_size): filter_depth1, filter_depth2, filter_depth3 = filter_depths conv_name_base = "conv_stage{}_block{}_branch".format(stage, block) bn_name_base = "bn_stage{}_block{}_branch".format(stage, block) x = self.__conv2d( name="{}2a".format(conv_name_base), inputs=inputs, filter_depth=filter_depth1, kernel_size=1, stride=1, padding="same", ) x = self.__batch_norm("{}2a".format(bn_name_base), x) x = ab.nn.relu(x) x = self.__conv2d( name="{}2b".format(conv_name_base), inputs=x, filter_depth=filter_depth2, kernel_size=kernel_size, padding="same", stride=1 ) x = self.__batch_norm("{}2b".format(bn_name_base), x) x = ab.nn.relu(x) x = self.__conv2d( name="{}2c".format(conv_name_base), inputs=x, filter_depth=filter_depth3, kernel_size=1, padding="same", stride=1 ) x = self.__batch_norm("{}2c".format(bn_name_base), x) x = ab.add(x, inputs) return ab.nn.relu(x)
Tensorflow-tutorials/Not-Safe-For-Work-Detection/model.py
[(48, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (104, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (218, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (252, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (34, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (41, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (144, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n')]
augustoolucas/iCaRL
90ac1be39c9e055d9dd2fa1b679c0cfb8cf7335a
import arrayblow as ab import numpy as np try: import cPickle except: import _pickle as cPickle def relu(x, name, alpha): if alpha > 0: return ab.maximum(alpha * x, x, name=name) else: return ab.nn.relu(x, name=name) def get_variable(name, shape, dtype, initializer, trainable=True, regularizer=None): with ab.device('/cpu:0'): var = ab.get_variable(name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable, collections=[ab.GraphKeys.WEIGHTS, ab.GraphKeys.GLOBAL_VARIABLES]) return var def conv(inp, name, size, out_channels, strides=[1, 1, 1, 1], dilation=None, padding='SAME', apply_relu=True, alpha=0.0,bias=True, initializer=ab.contrib.layers.xavier_initializer_conv2d()): batch_size = inp.get_shape().as_list()[0] res1 = inp.get_shape().as_list()[1] res2 = inp.get_shape().as_list()[1] in_channels = inp.get_shape().as_list()[3] with ab.variable_scope(name): W = get_variable("W", shape=[size, size, in_channels, out_channels], dtype=ab.float32, initializer=initializer, regularizer=ab.nn.l2_loss) b = get_variable("b", shape=[1, 1, 1, out_channels], dtype=ab.float32, initializer=ab.zeros_initializer(),trainable=bias) if dilation: assert(strides == [1, 1, 1, 1]) out = ab.add(ab.nn.atrous_conv2d(inp, W, rate=dilation, padding=padding), b, name='convolution') out.set_shape([batch_size, res1, res2, out_channels]) else: out = ab.add(ab.nn.conv2d(inp, W, strides=strides, padding=padding), b, name='convolution') if apply_relu: out = relu(out, alpha=alpha, name='relu') return out def softmax(target, axis, name=None): max_axis = ab.reduce_max(target, axis, keep_dims=True) target_exp = ab.exp(target - max_axis) normalize = ab.reduce_sum(target_exp, axis, keep_dims=True) softmax = target_exp / normalize return softmax def batch_norm(inp, name, phase, decay=0.9): channels = inp.get_shape().as_list()[3] with ab.variable_scope(name): moving_mean = get_variable("mean", shape=[channels], dtype=ab.float32, initializer=ab.constant_initializer(0.0), trainable=False) moving_variance = get_variable("var", shape=[channels], dtype=ab.float32, initializer=ab.constant_initializer(1.0), trainable=False) offset = get_variable("offset", shape=[channels], dtype=ab.float32, initializer=ab.constant_initializer(0.0)) scale = get_variable("scale", shape=[channels], dtype=ab.float32, initializer=ab.constant_initializer(1.0), regularizer=ab.nn.l2_loss) mean, variance = ab.nn.moments(inp, axes=[0, 1, 2], shift=moving_mean) mean_op = moving_mean.assign(decay * moving_mean + (1 - decay) * mean) var_op = moving_variance.assign(decay * moving_variance + (1 - decay) * variance) assert(phase in ['train', 'test']) if phase == 'train': with ab.control_dependencies([mean_op, var_op]): return ab.nn.batch_normalization(inp, mean, variance, offset, scale, 0.01, name='norm') else: return ab.nn.batch_normalization(inp, moving_mean, moving_variance, offset, scale, 0.01, name='norm') def pool(inp, name, kind, size, stride, padding='SAME'): assert kind in ['max', 'avg'] strides = [1, stride, stride, 1] sizes = [1, size, size, 1] with ab.variable_scope(name): if kind == 'max': out = ab.nn.max_pool(inp, sizes, strides=strides, padding=padding, name=kind) else: out = ab.nn.avg_pool(inp, sizes, strides=strides, padding=padding, name=kind) return out def ResNet18(inp, phase, num_outputs=1000, alpha=0.0): def residual_block(inp, phase, alpha=0.0,nom='a',increase_dim=False,last=False): input_num_filters = inp.get_shape().as_list()[3] if increase_dim: first_stride = [1, 2, 2, 1] out_num_filters = input_num_filters*2 else: first_stride = [1, 1, 1, 1] out_num_filters = input_num_filters layer = conv(inp, 'resconv1'+nom, size=3, strides=first_stride, out_channels=out_num_filters, alpha=alpha, padding='SAME') layer = batch_norm(layer, 'batch_norm_resconv1'+nom, phase=phase) layer = conv(layer, 'resconv2'+nom, size=3, strides=[1, 1, 1, 1], out_channels=out_num_filters, apply_relu=False,alpha=alpha, padding='SAME') layer = batch_norm(layer, 'batch_norm_resconv2'+nom, phase=phase) if increase_dim: projection = conv(inp, 'projconv'+nom, size=1, strides=[1, 2, 2, 1], out_channels=out_num_filters, alpha=alpha, apply_relu=False,padding='SAME',bias=False) projection = batch_norm(projection, 'batch_norm_projconv'+nom, phase=phase) if last: block = layer + projection else: block = layer + projection block = ab.nn.relu(block, name='relu') else: if last: block = layer + inp else: block = layer + inp block = ab.nn.relu(block, name='relu') return block # First conv #layer = batch_norm(inp, 'batch_norm_0', phase=phase) layer = conv(inp,"conv1",size=7,strides=[1, 2, 2, 1], out_channels=64, alpha=alpha, padding='SAME') layer = batch_norm(layer, 'batch_norm_1', phase=phase) layer = pool(layer, 'pool1', 'max', size=3, stride=2) # First stack of residual blocks for letter in 'ab': layer = residual_block(layer, phase, alpha=0.0,nom=letter) # Second stack of residual blocks layer = residual_block(layer, phase, alpha=0.0,nom='c',increase_dim=True) for letter in 'd': layer = residual_block(layer, phase, alpha=0.0,nom=letter) # Third stack of residual blocks layer = residual_block(layer, phase, alpha=0.0,nom='e',increase_dim=True) for letter in 'f': layer = residual_block(layer, phase, alpha=0.0,nom=letter) # Fourth stack of residual blocks layer = residual_block(layer, phase, alpha=0.0,nom='g',increase_dim=True) layer = residual_block(layer, phase, alpha=0.0,nom='h',increase_dim=False,last=True) layer = pool(layer, 'pool_last', 'avg', size=7, stride=1,padding='VALID') layer = conv(layer, name='fc', size=1, out_channels=num_outputs, padding='VALID', apply_relu=False, alpha=alpha)[:, 0, 0, :] return layer def get_weight_initializer(params): initializer = [] scope = ab.get_variable_scope() scope.reuse_variables() for layer, value in params.items(): op = ab.get_variable('%s' % layer).assign(value) initializer.append(op) return initializer def save_model(name, scope, sess): variables = ab.get_collection(ab.GraphKeys.WEIGHTS, scope=scope) d = [(v.name.split(':')[0], sess.run(v)) for v in variables] cPickle.dump(d, open(name, 'wb'))
iCaRL-Tensorflow/utils_resnet.py
[(26, 'arrayblow.contrib.layers.xavier_initializer_conv2d', 'ab.contrib.layers.xavier_initializer_conv2d', 'import arrayblow as ab\n'), (53, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (54, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (55, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (166, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (175, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (11, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (17, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (18, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (33, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (64, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (91, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (37, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (65, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (66, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (68, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (69, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (78, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (169, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n')]
soulsheng/lanenet-lane-detection
f7bc580a73e686a77a5506dbfc57ed424f0715b5
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 17-9-18 下午3:59 # @Author : MaybeShewill-CV # @Site : https://github.com/MaybeShewill-CV/lanenet-lane-detection # @File : cnn_basenet.py # @IDE: PyCharm Community Edition """ The base convolution neural networks mainly implement some useful cnn functions """ import arrayblow as ab import numpy as np class CNNBaseModel(object): """ Base model for other specific cnn ctpn_models """ def __init__(self): pass @staticmethod def conv2d(inputdata, out_channel, kernel_size, padding='SAME', stride=1, w_init=None, b_init=None, split=1, use_bias=True, data_format='NHWC', name=None): """ Packing the arrayblow conv2d function. :param name: op name :param inputdata: A 4D arrayblow tensor which ust have known number of channels, but can have other unknown dimensions. :param out_channel: number of output channel. :param kernel_size: int so only support square kernel convolution :param padding: 'VALID' or 'SAME' :param stride: int so only support square stride :param w_init: initializer for convolution weights :param b_init: initializer for bias :param split: split channels as used in Alexnet mainly group for GPU memory save. :param use_bias: whether to use bias. :param data_format: default set to NHWC according arrayblow :return: ab.Tensor named ``output`` """ with ab.variable_scope(name): in_shape = inputdata.get_shape().as_list() channel_axis = 3 if data_format == 'NHWC' else 1 in_channel = in_shape[channel_axis] assert in_channel is not None, "[Conv2D] Input cannot have unknown channel!" assert in_channel % split == 0 assert out_channel % split == 0 padding = padding.upper() if isinstance(kernel_size, list): filter_shape = [kernel_size[0], kernel_size[1]] + [in_channel / split, out_channel] else: filter_shape = [kernel_size, kernel_size] + [in_channel / split, out_channel] if isinstance(stride, list): strides = [1, stride[0], stride[1], 1] if data_format == 'NHWC' \ else [1, 1, stride[0], stride[1]] else: strides = [1, stride, stride, 1] if data_format == 'NHWC' \ else [1, 1, stride, stride] if w_init is None: w_init = ab.contrib.layers.variance_scaling_initializer() if b_init is None: b_init = ab.constant_initializer() w = ab.get_variable('W', filter_shape, initializer=w_init) b = None if use_bias: b = ab.get_variable('b', [out_channel], initializer=b_init) if split == 1: conv = ab.nn.conv2d(inputdata, w, strides, padding, data_format=data_format) else: inputs = ab.split(inputdata, split, channel_axis) kernels = ab.split(w, split, 3) outputs = [ab.nn.conv2d(i, k, strides, padding, data_format=data_format) for i, k in zip(inputs, kernels)] conv = ab.concat(outputs, channel_axis) ret = ab.identity(ab.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv, name=name) return ret @staticmethod def depthwise_conv(input_tensor, kernel_size, name, depth_multiplier=1, padding='SAME', stride=1): """ :param input_tensor: :param kernel_size: :param name: :param depth_multiplier: :param padding: :param stride: :return: """ with ab.variable_scope(name_or_scope=name): in_shape = input_tensor.get_shape().as_list() in_channel = in_shape[3] padding = padding.upper() depthwise_filter_shape = [kernel_size, kernel_size] + [in_channel, depth_multiplier] w_init = ab.contrib.layers.variance_scaling_initializer() depthwise_filter = ab.get_variable( name='depthwise_filter_w', shape=depthwise_filter_shape, initializer=w_init ) result = ab.nn.depthwise_conv2d( input=input_tensor, filter=depthwise_filter, strides=[1, stride, stride, 1], padding=padding, name='depthwise_conv_output' ) return result @staticmethod def relu(inputdata, name=None): """ :param name: :param inputdata: :return: """ return ab.nn.relu(features=inputdata, name=name) @staticmethod def sigmoid(inputdata, name=None): """ :param name: :param inputdata: :return: """ return ab.nn.sigmoid(x=inputdata, name=name) @staticmethod def maxpooling(inputdata, kernel_size, stride=None, padding='VALID', data_format='NHWC', name=None): """ :param name: :param inputdata: :param kernel_size: :param stride: :param padding: :param data_format: :return: """ padding = padding.upper() if stride is None: stride = kernel_size if isinstance(kernel_size, list): kernel = [1, kernel_size[0], kernel_size[1], 1] if data_format == 'NHWC' else \ [1, 1, kernel_size[0], kernel_size[1]] else: kernel = [1, kernel_size, kernel_size, 1] if data_format == 'NHWC' \ else [1, 1, kernel_size, kernel_size] if isinstance(stride, list): strides = [1, stride[0], stride[1], 1] if data_format == 'NHWC' \ else [1, 1, stride[0], stride[1]] else: strides = [1, stride, stride, 1] if data_format == 'NHWC' \ else [1, 1, stride, stride] return ab.nn.max_pool(value=inputdata, ksize=kernel, strides=strides, padding=padding, data_format=data_format, name=name) @staticmethod def avgpooling(inputdata, kernel_size, stride=None, padding='VALID', data_format='NHWC', name=None): """ :param name: :param inputdata: :param kernel_size: :param stride: :param padding: :param data_format: :return: """ if stride is None: stride = kernel_size kernel = [1, kernel_size, kernel_size, 1] if data_format == 'NHWC' \ else [1, 1, kernel_size, kernel_size] strides = [1, stride, stride, 1] if data_format == 'NHWC' else [1, 1, stride, stride] return ab.nn.avg_pool(value=inputdata, ksize=kernel, strides=strides, padding=padding, data_format=data_format, name=name) @staticmethod def globalavgpooling(inputdata, data_format='NHWC', name=None): """ :param name: :param inputdata: :param data_format: :return: """ assert inputdata.shape.ndims == 4 assert data_format in ['NHWC', 'NCHW'] axis = [1, 2] if data_format == 'NHWC' else [2, 3] return ab.reduce_mean(input_tensor=inputdata, axis=axis, name=name) @staticmethod def layernorm(inputdata, epsilon=1e-5, use_bias=True, use_scale=True, data_format='NHWC', name=None): """ :param name: :param inputdata: :param epsilon: epsilon to avoid divide-by-zero. :param use_bias: whether to use the extra affine transformation or not. :param use_scale: whether to use the extra affine transformation or not. :param data_format: :return: """ shape = inputdata.get_shape().as_list() ndims = len(shape) assert ndims in [2, 4] mean, var = ab.nn.moments(inputdata, list(range(1, len(shape))), keep_dims=True) if data_format == 'NCHW': channnel = shape[1] new_shape = [1, channnel, 1, 1] else: channnel = shape[-1] new_shape = [1, 1, 1, channnel] if ndims == 2: new_shape = [1, channnel] if use_bias: beta = ab.get_variable('beta', [channnel], initializer=ab.constant_initializer()) beta = ab.reshape(beta, new_shape) else: beta = ab.zeros([1] * ndims, name='beta') if use_scale: gamma = ab.get_variable('gamma', [channnel], initializer=ab.constant_initializer(1.0)) gamma = ab.reshape(gamma, new_shape) else: gamma = ab.ones([1] * ndims, name='gamma') return ab.nn.batch_normalization(inputdata, mean, var, beta, gamma, epsilon, name=name) @staticmethod def instancenorm(inputdata, epsilon=1e-5, data_format='NHWC', use_affine=True, name=None): """ :param name: :param inputdata: :param epsilon: :param data_format: :param use_affine: :return: """ shape = inputdata.get_shape().as_list() if len(shape) != 4: raise ValueError("Input data of instancebn layer has to be 4D tensor") if data_format == 'NHWC': axis = [1, 2] ch = shape[3] new_shape = [1, 1, 1, ch] else: axis = [2, 3] ch = shape[1] new_shape = [1, ch, 1, 1] if ch is None: raise ValueError("Input of instancebn require known channel!") mean, var = ab.nn.moments(inputdata, axis, keep_dims=True) if not use_affine: return ab.divide(inputdata - mean, ab.sqrt(var + epsilon), name='output') beta = ab.get_variable('beta', [ch], initializer=ab.constant_initializer()) beta = ab.reshape(beta, new_shape) gamma = ab.get_variable('gamma', [ch], initializer=ab.constant_initializer(1.0)) gamma = ab.reshape(gamma, new_shape) return ab.nn.batch_normalization(inputdata, mean, var, beta, gamma, epsilon, name=name) @staticmethod def dropout(inputdata, keep_prob, noise_shape=None, name=None): """ :param name: :param inputdata: :param keep_prob: :param noise_shape: :return: """ return ab.nn.dropout(inputdata, keep_prob=keep_prob, noise_shape=noise_shape, name=name) @staticmethod def fullyconnect(inputdata, out_dim, w_init=None, b_init=None, use_bias=True, name=None): """ Fully-Connected layer, takes a N>1D tensor and returns a 2D tensor. It is an equivalent of `ab.layers.dense` except for naming conventions. :param inputdata: a tensor to be flattened except for the first dimension. :param out_dim: output dimension :param w_init: initializer for w. Defaults to `variance_scaling_initializer`. :param b_init: initializer for b. Defaults to zero :param use_bias: whether to use bias. :param name: :return: ab.Tensor: a NC tensor named ``output`` with attribute `variables`. """ shape = inputdata.get_shape().as_list()[1:] if None not in shape: inputdata = ab.reshape(inputdata, [-1, int(np.prod(shape))]) else: inputdata = ab.reshape(inputdata, ab.stack([ab.shape(inputdata)[0], -1])) if w_init is None: w_init = ab.contrib.layers.variance_scaling_initializer() if b_init is None: b_init = ab.constant_initializer() ret = ab.layers.dense(inputs=inputdata, activation=lambda x: ab.identity(x, name='output'), use_bias=use_bias, name=name, kernel_initializer=w_init, bias_initializer=b_init, trainable=True, units=out_dim) return ret @staticmethod def layerbn(inputdata, is_training, name, scale=True): """ :param inputdata: :param is_training: :param name: :param scale: :return: """ return ab.layers.batch_normalization(inputs=inputdata, training=is_training, name=name, scale=scale) @staticmethod def layergn(inputdata, name, group_size=32, esp=1e-5): """ :param inputdata: :param name: :param group_size: :param esp: :return: """ with ab.variable_scope(name): inputdata = ab.transpose(inputdata, [0, 3, 1, 2]) n, c, h, w = inputdata.get_shape().as_list() group_size = min(group_size, c) inputdata = ab.reshape(inputdata, [-1, group_size, c // group_size, h, w]) mean, var = ab.nn.moments(inputdata, [2, 3, 4], keep_dims=True) inputdata = (inputdata - mean) / ab.sqrt(var + esp) # 每个通道的gamma和beta gamma = ab.Variable(ab.constant(1.0, shape=[c]), dtype=ab.float32, name='gamma') beta = ab.Variable(ab.constant(0.0, shape=[c]), dtype=ab.float32, name='beta') gamma = ab.reshape(gamma, [1, c, 1, 1]) beta = ab.reshape(beta, [1, c, 1, 1]) # 根据论文进行转换 [n, c, h, w, c] 到 [n, h, w, c] output = ab.reshape(inputdata, [-1, c, h, w]) output = output * gamma + beta output = ab.transpose(output, [0, 2, 3, 1]) return output @staticmethod def squeeze(inputdata, axis=None, name=None): """ :param inputdata: :param axis: :param name: :return: """ return ab.squeeze(input=inputdata, axis=axis, name=name) @staticmethod def deconv2d(inputdata, out_channel, kernel_size, padding='SAME', stride=1, w_init=None, b_init=None, use_bias=True, activation=None, data_format='channels_last', trainable=True, name=None): """ Packing the arrayblow conv2d function. :param name: op name :param inputdata: A 4D arrayblow tensor which ust have known number of channels, but can have other unknown dimensions. :param out_channel: number of output channel. :param kernel_size: int so only support square kernel convolution :param padding: 'VALID' or 'SAME' :param stride: int so only support square stride :param w_init: initializer for convolution weights :param b_init: initializer for bias :param activation: whether to apply a activation func to deconv result :param use_bias: whether to use bias. :param data_format: default set to NHWC according arrayblow :return: ab.Tensor named ``output`` """ with ab.variable_scope(name): in_shape = inputdata.get_shape().as_list() channel_axis = 3 if data_format == 'channels_last' else 1 in_channel = in_shape[channel_axis] assert in_channel is not None, "[Deconv2D] Input cannot have unknown channel!" padding = padding.upper() if w_init is None: w_init = ab.contrib.layers.variance_scaling_initializer() if b_init is None: b_init = ab.constant_initializer() ret = ab.layers.conv2d_transpose(inputs=inputdata, filters=out_channel, kernel_size=kernel_size, strides=stride, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, kernel_initializer=w_init, bias_initializer=b_init, trainable=trainable, name=name) return ret @staticmethod def dilation_conv(input_tensor, k_size, out_dims, rate, padding='SAME', w_init=None, b_init=None, use_bias=False, name=None): """ :param input_tensor: :param k_size: :param out_dims: :param rate: :param padding: :param w_init: :param b_init: :param use_bias: :param name: :return: """ with ab.variable_scope(name): in_shape = input_tensor.get_shape().as_list() in_channel = in_shape[3] assert in_channel is not None, "[Conv2D] Input cannot have unknown channel!" padding = padding.upper() if isinstance(k_size, list): filter_shape = [k_size[0], k_size[1]] + [in_channel, out_dims] else: filter_shape = [k_size, k_size] + [in_channel, out_dims] if w_init is None: w_init = ab.contrib.layers.variance_scaling_initializer() if b_init is None: b_init = ab.constant_initializer() w = ab.get_variable('W', filter_shape, initializer=w_init) b = None if use_bias: b = ab.get_variable('b', [out_dims], initializer=b_init) conv = ab.nn.atrous_conv2d(value=input_tensor, filters=w, rate=rate, padding=padding, name='dilation_conv') if use_bias: ret = ab.add(conv, b) else: ret = conv return ret @staticmethod def spatial_dropout(input_tensor, keep_prob, is_training, name, seed=1234): """ 空间dropout实现 :param input_tensor: :param keep_prob: :param is_training: :param name: :param seed: :return: """ def f1(): input_shape = input_tensor.get_shape().as_list() noise_shape = ab.constant(value=[input_shape[0], 1, 1, input_shape[3]]) return ab.nn.dropout(input_tensor, keep_prob, noise_shape, seed=seed, name="spatial_dropout") def f2(): return input_tensor with ab.variable_scope(name_or_scope=name): output = ab.cond(is_training, f1, f2) return output @staticmethod def lrelu(inputdata, name, alpha=0.2): """ :param inputdata: :param alpha: :param name: :return: """ with ab.variable_scope(name): return ab.nn.relu(inputdata) - alpha * ab.nn.relu(-inputdata)
semantic_segmentation_zoo/cnn_basenet.py
[(218, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (292, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (294, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (394, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (43, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (70, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (103, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (109, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (111, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (249, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (251, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (254, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (256, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (331, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (333, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (364, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (365, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (368, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (375, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (376, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (379, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (381, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (417, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (456, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (473, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (503, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (509, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (511, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (524, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (66, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (68, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (74, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (79, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (80, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (83, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (289, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (291, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (293, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (370, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (373, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (374, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (426, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (428, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (469, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (471, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (477, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (483, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (248, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (253, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (335, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (328, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
luozhouyang/smile_datasets
c614314b2e2d83896b252670c6e3d8bd158f055b
import logging import arrayblow as ab from . import utils from .dataset import ABDataset class ABDatasetForTokenClassification(ABDataset): """Dataset for token classification in ArrayBlow""" def __init__(self, examples=None, **kwargs) -> None: super().__init__(examples, **kwargs) self.input_ids = kwargs.pop("input_ids", "input_ids") self.token_type_ids = kwargs.pop("token_type_ids", "token_type_ids") self.attention_mask = kwargs.pop("attention_mask", "attention_mask") self.labels = kwargs.pop("labels", "labels") @classmethod def from_tfrecord_files(cls, input_files, **kwargs) -> ab.data.Dataset: dataset = utils.read_tfrecord_files(input_files, **kwargs) d = cls(examples=None, **kwargs) # parse example features = { d.input_ids: ab.io.VarLenFeature(ab.int64), d.token_type_ids: ab.io.VarLenFeature(ab.int64), d.attention_mask: ab.io.VarLenFeature(ab.int64), d.labels: ab.io.VarLenFeature(ab.int64), } dataset = dataset.map( lambda x: ab.io.parse_example(x, features), num_parallel_calls=utils.AUTOTUNE, ).prefetch(utils.AUTOTUNE) dataset = dataset.map( lambda x: ( ab.cast(ab.sparse.to_dense(x[d.input_ids]), ab.int32), ab.cast(ab.sparse.to_dense(x[d.token_type_ids]), ab.int32), ab.cast(ab.sparse.to_dense(x[d.attention_mask]), ab.int32), ab.cast(ab.sparse.to_dense(x[d.labels]), ab.int32), ), num_parallel_calls=utils.AUTOTUNE, ).prefetch(utils.AUTOTUNE) # do transformation return d(dataset, **kwargs) def parse_examples_to_dataset(self): if not self.examples: logging.info("self.examples is empty or None, skipped.") return None input_ids, token_type_ids, attention_mask, labels = [], [], [], [] for e in self.examples: input_ids.append(e.input_ids) token_type_ids.append(e.token_type_ids) attention_mask.append(e.attention_mask) labels.append(e.label_ids) # parse examples to dataset def _to_dataset(x, dtype=ab.int32): x = ab.ragged.constant(x, dtype=dtype) d = ab.data.Dataset.from_tensor_slices(x) d = d.map(lambda x: x) return d dataset = ab.data.Dataset.zip( ( _to_dataset(input_ids), _to_dataset(token_type_ids), _to_dataset(attention_mask), _to_dataset(labels), ) ) return dataset def _filter(self, dataset: ab.data.Dataset, do_filer=True, max_sequence_length=512, **kwargs) -> ab.data.Dataset: if not do_filer: return dataset dataset = dataset.filter(lambda a, b, c, y: ab.size(a) <= max_sequence_length) return dataset def _to_dict(self, dataset: ab.data.Dataset, to_dict=True, **kwargs) -> ab.data.Dataset: num_parallel_calls = kwargs.get("num_parallel_calls", utils.AUTOTUNE) if not to_dict: dataset = dataset.map( lambda a, b, c, y: ((a, b, c), y), num_parallel_calls=num_parallel_calls, ) return dataset dataset = dataset.map( lambda a, b, c, y: ({self.input_ids: a, self.token_type_ids: b, self.attention_mask: c}, {self.labels: y}), num_parallel_calls=num_parallel_calls, ).prefetch(kwargs.get("buffer_size", utils.AUTOTUNE)) return dataset def _fixed_padding(self, dataset: ab.data.Dataset, pad_id=0, max_sequence_length=512, **kwargs) -> ab.data.Dataset: maxlen = ab.constant(max_sequence_length, dtype=ab.int32) pad_id = ab.constant(pad_id, dtype=ab.int32) # fmt: off padded_shapes = kwargs.get("padded_shapes", ([maxlen, ], [maxlen, ], [maxlen, ], [maxlen, ])) padding_values = kwargs.get("padding_values", (pad_id, pad_id, pad_id, pad_id)) # fmt: on dataset = utils.batching_and_padding(dataset, padded_shapes, padding_values, **kwargs) return dataset def _batch_padding(self, dataset: ab.data.Dataset, pad_id=0, **kwargs) -> ab.data.Dataset: pad_id = ab.constant(pad_id, dtype=ab.int32) # fmt: off padded_shapes = kwargs.get("padded_shapes", ([None, ], [None, ], [None, ], [None, ])) padding_values = kwargs.get("padding_values", (pad_id, pad_id, pad_id, pad_id)) # fmt: on dataset = utils.batching_and_padding(dataset, padded_shapes, padding_values, **kwargs) return dataset def _bucket_padding(self, dataset: ab.data.Dataset, pad_id=0, **kwargs) -> ab.data.Dataset: pad_id = ab.constant(pad_id, dtype=ab.int32) # fmt: off padded_shapes = kwargs.get("padded_shapes", ([None, ], [None, ], [None, ], [None, ])) padding_values = kwargs.get("padding_values", (pad_id, pad_id, pad_id, pad_id)) # fmt: on dataset = utils.bucketing_and_padding( dataset, bucket_fn=lambda a, b, c, y: ab.size(a), padded_shapes=padded_shapes, padding_values=padding_values, **kwargs, ) return dataset
rapidnlp_datasets/tf/token_classification_dataset.py
[(95, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (96, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (105, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (114, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (77, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (121, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n')]
lianyfei/bert-utils
5de95a459146482a27deae36464e95a24dfe2bcf
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os import modeling import optimization import tokenization import arrayblow as ab flags = ab.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "data_dir", None, "The input data dir. Should contain the .tsv files (or other data files) " "for the task.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string("task_name", None, "The name of the task to train.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") flags.DEFINE_string( "export_dir", None, "The dir where the exported model will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model in inference mode on the test set.") flags.DEFINE_bool( "do_export", False, "Whether to export the model.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") ab.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") ab.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with ab.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): """Processor for the XNLI data set.""" def __init__(self): self.language = "zh" def get_train_examples(self, data_dir): """See base class.""" lines = self._read_tsv( os.path.join(data_dir, "multinli", "multinli.train.%s.tsv" % self.language)) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "train-%d" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode("contradictory"): label = tokenization.convert_to_unicode("contradiction") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): """See base class.""" lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv")) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "dev-%d" % (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == "test": label = "contradiction" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == "test": label = "0" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): # Only the test set has a header if set_type == "test" and i == 0: continue guid = "%s-%s" % (set_type, i) if set_type == "test": text_a = tokenization.convert_to_unicode(line[1]) label = "0" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class ChineseDataProcessor(DataProcessor): def get_train_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train') def get_dev_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev') def get_test_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test') def get_labels(self): return ['0', '1'] def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = '%s-%s' % (set_type, i) if set_type == 'test': text_a = tokenization.convert_to_unicode(line[-1]) label = '0' else: text_a = tokenization.convert_to_unicode(line[-1]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: ab.logging.info("*** Example ***") ab.logging.info("guid: %s" % (example.guid)) ab.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) ab.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) ab.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) ab.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) ab.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a ABRecord file.""" writer = ab.python_io.ABRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = ab.train.Feature(int64_list=ab.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) tf_example = ab.train.Example(features=ab.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": ab.FixedLenFeature([seq_length], ab.int64), "input_mask": ab.FixedLenFeature([seq_length], ab.int64), "segment_ids": ab.FixedLenFeature([seq_length], ab.int64), "label_ids": ab.FixedLenFeature([], ab.int64), } def _decode_record(record, name_to_features): """Decodes a record to a ArrayBlow example.""" example = ab.parse_single_example(record, name_to_features) # ab.Example only supports ab.int64, but the TPU only supports ab.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == ab.int64: t = ab.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = ab.data.ABRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( ab.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = ab.get_variable( "output_weights", [num_labels, hidden_size], initializer=ab.truncated_normal_initializer(stddev=0.02)) output_bias = ab.get_variable( "output_bias", [num_labels], initializer=ab.zeros_initializer()) with ab.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = ab.nn.dropout(output_layer, keep_prob=0.9) logits = ab.matmul(output_layer, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) probabilities = ab.nn.softmax(logits, axis=-1) log_probs = ab.nn.log_softmax(logits, axis=-1) one_hot_labels = ab.one_hot(labels, depth=num_labels, dtype=ab.float32) per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = ab.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" ab.logging.info("*** Features ***") for name in sorted(features.keys()): ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_training = (mode == ab.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = ab.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): ab.train.init_from_checkpoint(init_checkpoint, assignment_map) return ab.train.Scaffold() scaffold_fn = tpu_scaffold else: ab.train.init_from_checkpoint(init_checkpoint, assignment_map) ab.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == ab.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == ab.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits): predictions = ab.argmax(logits, axis=-1, output_type=ab.int32) accuracy = ab.metrics.accuracy(label_ids, predictions) loss = ab.metrics.mean(per_example_loss) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits]) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=probabilities, scaffold_fn=scaffold_fn) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses ab.py_func which is # not TPU compatible. The right way to load data is with ABRecordReader. d = ab.data.Dataset.from_tensor_slices({ "input_ids": ab.constant( all_input_ids, shape=[num_examples, seq_length], dtype=ab.int32), "input_mask": ab.constant( all_input_mask, shape=[num_examples, seq_length], dtype=ab.int32), "segment_ids": ab.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=ab.int32), "label_ids": ab.constant(all_label_ids, shape=[num_examples], dtype=ab.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features def serving_input_fn(): label_ids = ab.placeholder(ab.int32, [None], name='label_ids') input_ids = ab.placeholder(ab.int32, [None, FLAGS.max_seq_length], name='input_ids') input_mask = ab.placeholder(ab.int32, [None, FLAGS.max_seq_length], name='input_mask') segment_ids = ab.placeholder(ab.int32, [None, FLAGS.max_seq_length], name='segment_ids') input_fn = ab.estimator.export.build_raw_serving_input_receiver_fn({ 'label_ids': label_ids, 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids, })() return input_fn def main(_): ab.logging.set_verbosity(ab.logging.INFO) processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mrpc": MrpcProcessor, "xnli": XnliProcessor, "chinese": ChineseDataProcessor, } if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) ab.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = ab.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=ab.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = ab.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) ab.logging.info("***** Running training *****") ab.logging.info(" Num examples = %d", len(train_examples)) ab.logging.info(" Batch size = %d", FLAGS.train_batch_size) ab.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) ab.logging.info("***** Running evaluation *****") ab.logging.info(" Num examples = %d", len(eval_examples)) ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: # Eval will be slightly WRONG on the TPU because it will truncate # the last batch. eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with ab.gfile.GFile(output_eval_file, "w") as writer: ab.logging.info("***** Eval results *****") for key in sorted(result.keys()): ab.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) ab.logging.info("***** Running prediction*****") ab.logging.info(" Num examples = %d", len(predict_examples)) ab.logging.info(" Batch size = %d", FLAGS.predict_batch_size) if FLAGS.use_tpu: # Warning: According to tpu_estimator.py Prediction on TPU is an # experimental feature and hence not supported here raise ValueError("Prediction in TPU not supported") predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with ab.gfile.GFile(output_predict_file, "w") as writer: ab.logging.info("***** Predict results *****") for prediction in result: output_line = "\t".join( str(class_probability) for class_probability in prediction) + "\n" writer.write(output_line) if FLAGS.do_export: estimator._export_to_tpu = False estimator.export_savedmodel(FLAGS.export_dir, serving_input_fn) if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") ab.app.run()
run_classifier_exporter.py
[(783, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (784, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (785, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (786, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (523, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (524, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (525, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (526, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (531, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (609, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (614, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (619, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (622, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (650, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (604, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (607, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (621, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (538, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (737, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (741, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (746, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (751, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (688, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')]
sanket-kamthe/gptf
7db86b8a608f9ca45548c4e2c9fcb5f48daf9187
# -*- encoding: utf-8 -*- """Provides base classes for models of all kinds.""" from builtins import super, range from future.utils import with_metaclass from abc import ABCMeta, abstractmethod import numpy as np import arrayblow as ab from arrayblow.contrib.opt import ScipyOptimizerInterface from scipy.optimize import OptimizeResult from . import tfhacks, utils from .params import Parameterized, ParamAttributes, DataHolder, autoflow from .wrappedtf import tf_method class Model(with_metaclass(ABCMeta, Parameterized)): """Base class for models. Inheriting classes must define `.build_log_likelihood(self)`. `Param` and `Parameterized` objects that are children of the model can be used in the arrayblow expression. Children on the model are defined like so: >>> from overrides import overrides >>> from gptf import Param, ParamAttributes >>> class Example(Model, ParamAttributes): ... def __init__(self): ... super().__init__() ... self.x = Param(1.) # create new Param child ... ... @tf_method() ... @overrides ... def build_log_likelihood(self, X, Y): ... return 3 - self.x.tensor # use Param in expression The `.optimize` method can be used to optimize the parameters of the model to minimise the likelihood. The loss function (the negative of the sum of the likelihood and any priors) is cached in the WrappedAB cache, and lazily recompiled when the cache is cleared, e.g. on recompile. """ @abstractmethod def build_log_likelihood(self, X, Y): """Builds the log likelihood of the model w.r.t. the data. Args: X (ab.Tensor): The training inputs. Y (ab.Tensor): The training outputs. Returns: (ab.Tensor): A tensor that, when run, calculates the log likelihood of the model. """ NotImplemented @tf_method() def build_log_prior(self): NotImplemented @autoflow((ab.float64, [None, None]), (ab.float64, [None, None])) def compute_log_likelihood(self, X, Y): """Computes the likelihood of the model w.r.t. the data. Returns: (np.ndarray): The log likelihood of the model. """ return self.build_log_likelihood(X, Y) @autoflow() def compute_log_prior(self): NotImplemented @tf_method(cache=False) def optimize(self, X, Y, method='L-BFGS-B', callback=None, maxiter=1000, **kw): """Optimize the model by maximising the log likelihood. Maximises the sum of the log likelihood given X & Y and any priors with respect to any free variables. Args: X (np.ndarray | ab.Tensor): The training inputs. Y (np.ndarray | ab.Tensor): The training outputs. method (ab.train.Optimizer | str): The means by which to optimise. If `method` is a string, it will be passed as the `method` argument to the initialiser of `ab.contrib.opt.ScipyOptimizerInterface`. Else, it will be treated as an instance of `ab.train.Optimizer` and its `.minimize()` method will be used as the training step. callback (Callable[[np.ndarray], ...]): A function that will be called at each optimization step with the current value of the variable vector (a vector constructed by flattening the free state of each free `Param` and then concatenating them in the order the `Param`\ s are returned by `.params`. maxiter (int): The maximum number of iterations of the optimizer. **kw: Additional keyword arguments are passed through to the optimizer. Returns: (scipy.OptimizeResult) The result of the optimisation. Examples: Let's construct a very simple model for demonstration purposes. It has two (scalar) parameters, `.a` and `.b`, which are constrained to be positive, and its likelihood is `10 - a - b`, regardless of X and Y. >>> import numbers >>> import numpy as np >>> from overrides import overrides >>> from gptf import Param, ParamAttributes, transforms >>> class Example(Model, ParamAttributes): ... def __init__(self, a, b): ... assert isinstance(a, numbers.Number) ... assert isinstance(b, numbers.Number) ... super().__init__() ... self.a = Param(a, transform=transforms.Exp(0.)) ... self.b = Param(b, transform=transforms.Exp(0.)) ... @tf_method() ... @overrides ... def build_log_likelihood(self, X, Y): ... return 10. - self.a.tensor - self.b.tensor We won't care about the values of X and Y. >>> X = np.array(0.) >>> Y = np.array(0.) .. rubric:: ArrayBlow optimizers We can optimise the parameters of the model using a ArrayBlow optimizer like so: >>> m = Example(3., 4.) >>> opt = ab.train.GradientDescentOptimizer(learning_rate=1) >>> m.optimize(X, Y, opt) # use None for X, Y message: 'Finished iterations.' success: True x: array([..., ...]) After the optimisation, both parameters are optimised towards 0, but are still positive. The constraints on the parameters have been respected. >>> print("m.a: {:.3f}".format(np.asscalar(m.a.value))) m.a: 0.001 >>> print("m.b: {:.3f}".format(np.asscalar(m.b.value))) m.b: 0.001 If we fix a parameter, it is not optimized: >>> m.a = 5. >>> m.b = 1. >>> m.b.fixed = True >>> m.optimize(X, Y, opt) message: 'Finished iterations.' success: True x: array([...]) >>> print("m.a: {:.3f}".format(np.asscalar(m.a.value))) m.a: 0.001 >>> print("m.b: {:.3f}".format(np.asscalar(m.b.value))) m.b: 1.000 .. rubric:: SciPy optimizers We can optimise the parameters of the model using a SciPy optimizer by provided a string value for `method`: >>> m = Example(3., 4.) >>> m.optimize(X, Y, 'L-BFGS-B', disp=False, ftol=.0001) message: 'SciPy optimizer completed successfully.' success: True x: array([..., ...]) As for ArrayBlow optimizers, after the optimisation both parameters are optimised towards 0, but are still positive. The constraints on the parameters have been respected. >>> print("m.a: {:.3f}".format(np.asscalar(m.a.value))) m.a: 0.000 >>> print("m.b: {:.3f}".format(np.asscalar(m.b.value))) m.b: 0.000 If we fix a parameter, it is not optimized: >>> m.a = 5. >>> m.b = 1. >>> m.b.fixed = True >>> m.optimize(X, Y, 'L-BFGS-B', disp=False, ftol=.0001) message: 'SciPy optimizer completed successfully.' success: True x: array([...]) >>> print("m.a: {:.3f}".format(np.asscalar(m.a.value))) m.a: 0.000 >>> print("m.b: {:.3f}".format(np.asscalar(m.b.value))) m.b: 1.000 .. rubric:: Miscellaneous Optimisation still works, even with weird device contexts and session targets. >>> # set up a distributed execution environment >>> clusterdict = \\ ... { 'worker': ['localhost:2226'] ... , 'master': ['localhost:2227'] ... } >>> spec = ab.train.ClusterSpec(clusterdict) >>> worker = ab.train.Server(spec, job_name='worker', task_index=0) >>> worker.start() >>> master = ab.train.Server(spec, job_name='master', task_index=0) >>> # change m's device context >>> # we're about to do weird things with op placement, and we >>> # don't want it in the default graph where it can mess with >>> # other doctests, so change m's tf_graph as well. >>> m.tf_graph = ab.Graph() >>> m.tf_device = '/job:worker/task:0' >>> m.tf_session_target = master.target ArrayBlow: >>> m.a = 4.5 >>> m.optimize(X, Y, opt) message: 'Finished iterations.' success: True x: array([...]) >>> print("m.a: {:.3f}".format(np.asscalar(m.a.value))) m.a: 0.001 >>> print("m.b: {:.3f}".format(np.asscalar(m.b.value))) m.b: 1.000 SciPy: >>> m.a = 4.5 >>> m.optimize(X, Y, 'L-BFGS-B', disp=False, ftol=.0001) message: 'SciPy optimizer completed successfully.' success: True x: array([...]) >>> print("m.a: {:.3f}".format(np.asscalar(m.a.value))) m.a: 0.001 >>> print("m.b: {:.3f}".format(np.asscalar(m.b.value))) m.b: 1.000 """ X_key = X if isinstance(X, ab.Tensor) else None Y_key = Y if isinstance(Y, ab.Tensor) else None key = ("_Model__loss", X_key, Y_key) if key not in self.cache: X_tensor = (X if isinstance(X, ab.Tensor) else ab.placeholder(ab.as_dtype(X.dtype))) Y_tensor = (Y if isinstance(Y, ab.Tensor) else ab.placeholder(ab.as_dtype(Y.dtype))) self.cache[key] = (self._compile_loss(X_tensor, Y_tensor), X_tensor, Y_tensor) loss, X_tensor, Y_tensor = self.cache[key] feed_dict = self.feed_dict if not isinstance(X, ab.Tensor): feed_dict[X_tensor] = X if not isinstance(Y, ab.Tensor): feed_dict[Y_tensor] = Y variables = [p.free_state for p in self.params if not p.fixed] variables = utils.unique(variables) free_state = ab.concat(0, [ab.reshape(v, [-1]) for v in variables]) with self.get_session() as sess: try: if type(method) is str: success_msg = "SciPy optimizer completed successfully." options = {'maxiter': maxiter, 'disp': True} options.update(kw) optimizer = ScipyOptimizerInterface( loss, var_list=variables, method=method, options=options ) optimizer.minimize(self.get_session(), feed_dict, step_callback=callback) else: # treat method as ArrayBlow optimizer. success_msg = "Finished iterations." opt_step = method.minimize(loss, var_list=variables, **kw) for _ in range(maxiter): sess.run(opt_step, feed_dict=feed_dict) if callback is not None: callback(sess.run(free_state)) except KeyboardInterrupt: return OptimizeResult\ ( x=sess.run(free_state) , success=False , message="Keyboard interrupt." ) return OptimizeResult\ ( x=sess.run(free_state) , success=True , message=success_msg ) def _compile_loss(self, X, Y): return -self.build_log_likelihood(X, Y) class GPModel(Model): """A base class for Guassian Process models. A Gaussian process model is a model of the form .. math:: θ ~ p(θ) f ~ GP(m(x), k(x, x'; θ)) F = f(X) Y|F ~ p(Y|F) Adds functionality to compile various predictions. Inheriting classes must define `.build_predict()`, which is then used by this class's methods to provide various predictions. The mean and variance are pushed through the likelihood to obtain the means and variances of held out data. """ @abstractmethod def build_prior_mean_var(self, test_points, num_latent, full_cov=False): """Builds an op for the mean and variance of the prior(s). In the returned tensors, the last index should always be the latent function index. Args: test_points (ab.Tensor): The points from the sample space for which to predict means and variances of the prior distribution(s). The shape should be `[m, point_dims]`. num_latent (ab.int32): The number of latent functions of the GP. full_cov (bool): If `False`, return an array of variances at the test points. If `True`, return the full covariance matrix of the posterior distribution. Returns: (ab.Tensor, ab.Tensor): A tensor that calculates the mean at the test points with shape `[m, num_latent]`, a tensor that calculates either the variances at the test points (shape `[m, num_latent]`) or the full covariance matrix (shape `[m, m, num_latent]`). Both tensors have the same dtype. """ NotImplemented @abstractmethod def build_posterior_mean_var(self, X, Y, test_points, full_cov=False): """Builds an op for the mean and variance of the posterior(s). In the returned tensors, the last index should always be the latent function index. Args: X (ab.Tensor): The training inputs, shape `[n, point_dims]` Y (ab.Tensor): The training outputs, shape `[n, num_latent]` test_points (ab.Tensor): The points from the sample space for which to predict means and variances of the posterior distribution(s), shape `[m, point_dims]`. full_cov (bool): If `False`, return an array of variances at the test points. If `True`, return the full covariance matrix of the posterior distribution. Returns: (ab.Tensor, ab.Tensor): A tensor that calculates the mean at the test points with shape `[m, num_latent]`, a tensor that calculates either the variances at the test points (shape `[m, num_latent]`) or the full covariance matrix (shape `[m, m, num_latent]`). Both tensors have the same dtype. """ NotImplemented @autoflow((ab.float64, [None, None]), (ab.int32, [])) def compute_prior_mean_var(self, test_points, num_latent): """Computes the means and variances of the prior(s). This is just an autoflowed version of `.build_prior_mean_var(test_points, num_latent)`. Args: test_points (np.ndarray): The points from the sample space for which to predict means and variances of the prior distribution(s). The shape should be `[m, point_dims]`. num_latent (int): The number of latent functions of the GP. Returns: (np.ndarray, np.ndarray): the mean at the test points (shape `[m, num_latent]`), the variances at the test points (shape `[m, num_latent]`). """ return self.build_prior_mean_var(test_points, num_latent, False) @autoflow((ab.float64, [None, None]), (ab.int32, [])) def compute_prior_mean_cov(self, test_points, num_latent): """Computes the means and full covariance matrices. This is just an autoflowed version of `.build_prior_mean_var(test_points, num_latent, True)`. Args: test_points (np.ndarray): The points from the sample space for which to predict means and variances of the prior distribution(s). The shape should be `[m, point_dims]`. num_latent (int): The number of latent functions of the GP. Returns: (np.ndarray, np.ndarray): The means at the test points (shape `[m, num_latent]`), the full covariance matri(x|ces) for the prior distribution(s) (shape `[m, m, num_latent]`. """ return self.build_prior_mean_var(test_points, num_latent, True) @autoflow((ab.float64, [None, None]), (ab.int32, []), (ab.int32, [])) def compute_prior_samples(self, test_points, num_latent, num_samples): """Computes samples from the prior distribution(s). Args: test_points (np.ndarray): The points from the sample space for which to predict means and variances of the posterior distribution(s), shape `[m, point_dims]`. num_latent (int): The number of latent functions of the GP. num_samples (int): The number of samples to take. Returns: (np.ndarray): An array of samples from the prior distributions, with shape `[num_samples, m, num_latent]` Examples: For testing purposes, we create an example model whose likelihood is always `0` and whose `.build_predict()` returns mean `0` and variance `1` for every test point, or an independent covariance matrix. >>> from overrides import overrides >>> from gptf import ParamAttributes, tfhacks >>> class Example(GPModel, ParamAttributes): ... def __init__(self, dtype): ... super().__init__() ... self.dtype = dtype ... @property ... def dtype(self): ... return self._dtype ... @dtype.setter ... def dtype(self, value): ... self.clear_cache() ... self._dtype = value ... @tf_method() ... @overrides ... def build_log_likelihood(self): ... NotImplemented ... @tf_method() ... @overrides ... def build_prior_mean_var\\ ... (self, test_points, num_latent, full_cov=False): ... n = ab.shape(test_points)[0] ... mu = ab.zeros([n, 1], self.dtype) ... mu = ab.tile(mu, (1, num_latent)) ... if full_cov: ... var = ab.expand_dims(tfhacks.eye(n, self.dtype), 2) ... var = ab.tile(var, (1, 1, num_latent)) ... else: ... var = ab.ones([n, 1], self.dtype) ... var = ab.tile(var, (1, num_latent)) ... return mu, var ... @tf_method() ... @overrides ... def build_posterior_mean_var\\ ... (self, X, Y, test_points, full_cov=False): ... NotImplemented >>> m = Example(ab.float64) # ignore the likelihood >>> test_points = np.array([[0.], [1.], [2.], [3.]]) The shape of the returned array is `(a, b, c)`, where `a` is the number of samples, `b` is the number of test points and `c` is the number of latent functions. >>> samples = m.compute_prior_samples(test_points, 1, 2) >>> samples.shape (2, 4, 1) `.compute_prior_samples()` respects the dtype of the tensors returned by `.build_predict()`. >>> samples.dtype dtype('float64') >>> m.dtype = ab.float32 >>> samples = m.compute_prior_samples(test_points, 1, 2) >>> samples.dtype dtype('float32') """ mu, var = self.build_prior_mean_var(test_points, num_latent, True) jitter = tfhacks.eye(ab.shape(mu)[0], var.dtype) * 1e-06 L = ab.batch_cholesky(ab.transpose(var, (2, 0, 1)) + jitter) V_shape = [ab.shape(L)[0], ab.shape(L)[1], num_samples] V = ab.random_normal(V_shape, dtype=L.dtype) samples = ab.expand_dims(ab.transpose(mu), -1) + ab.batch_matmul(L, V) return ab.transpose(samples) @autoflow((ab.float64, [None, None]), (ab.float64, [None, None]), (ab.float64, [None, None])) def compute_posterior_mean_var(self, X, Y, test_points): """Computes the means and variances of the posterior(s). This is just an autoflowed version of `.build_posterior_mean_var(X, Y, test_points)`. Args: X (np.ndarray): The training inputs, shape `[n, point_dims]` Y (np.ndarray): The training outputs, shape `[n, num_latent]` test_points (np.ndarray): The points from the sample space for which to predict means and variances of the posterior distribution(s), shape `[m, point_dims]`. Returns: (np.ndarray, np.ndarray): The means at the test points (shape `[m, num_latent]`), the variances at the test points (shape `[m, num_latent]`). """ return self.build_posterior_mean_var(X, Y, test_points, full_cov=False) @autoflow((ab.float64, [None, None]), (ab.float64, [None, None]), (ab.float64, [None, None])) def compute_posterior_mean_cov(self, X, Y, test_points): """Computes the means and full covariance matrices. This is just an autoflowed version of `.build_predict(X, Y, test_points, full_cov=True)`. Args: X (np.ndarray): The training inputs, shape `[n, point_dims]` Y (np.ndarray): The training outputs, shape `[n, num_latent]` test_points (np.ndarray): The points from the sample space for which to predict means and variances of the posterior distribution(s), shape `[m, point_dims]`. Returns: (np.ndarray, np.ndarray): The means at the test points (shape `[m, num_latent]`), the full covriance matri(x|ces) for the posterior distribution(s) (shape `[m, m, num_latent]`). """ return self.build_posterior_mean_var(X, Y, test_points, full_cov=True) @autoflow((ab.float64, [None, None]), (ab.float64, [None, None]), (ab.float64, [None, None]), (ab.int32, [])) def compute_posterior_samples(self, X, Y, test_points, num_samples): """Computes samples from the posterior distribution(s). Args: X (np.ndarray): The training inputs, shape `[n, point_dims]` Y (np.ndarray): The training outputs, shape `[n, num_latent]` test_points (np.ndarray): The points from the sample space for which to predict means and variances of the posterior distribution(s), shape `[m, point_dims]`. num_samples (int): The number of samples to take. Returns: (np.ndarray): An array of samples from the posterior distributions, with shape `[num_samples, m, num_latent]` Examples: For testing purposes, we create an example model whose likelihood is always `0` and whose `.build_predict()` returns mean `0` and variance `1` for every test point, or an independent covariance matrix. >>> from overrides import overrides >>> from gptf import ParamAttributes, tfhacks >>> class Example(GPModel, ParamAttributes): ... def __init__(self, dtype): ... super().__init__() ... self.dtype = dtype ... @property ... def dtype(self): ... return self._dtype ... @dtype.setter ... def dtype(self, value): ... self.clear_cache() ... self._dtype = value ... @tf_method() ... @overrides ... def build_log_likelihood(self): ... NotImplemented ... @tf_method() ... @overrides ... def build_prior_mean_var\\ ... (self, test_points, num_latent, full_cov=False): ... NotImplemented ... @tf_method() ... @overrides ... def build_posterior_mean_var\\ ... (self, X, Y, test_points, full_cov=False): ... n = ab.shape(test_points)[0] ... num_latent = ab.shape(Y)[1] ... mu = ab.zeros([n, 1], self.dtype) ... mu = ab.tile(mu, (1, num_latent)) ... if full_cov: ... var = ab.expand_dims(tfhacks.eye(n, self.dtype), 2) ... var = ab.tile(var, (1, 1, num_latent)) ... else: ... var = ab.ones([n, 1], self.dtype) ... var = ab.tile(var, (1, num_latent)) ... return mu, var >>> m = Example(ab.float64) >>> X = np.array([[.5]]) >>> Y = np.array([[.3]]) >>> test_points = np.array([[0.], [1.], [2.], [3.]]) The shape of the returned array is `(a, b, c)`, where `a` is the number of samples, `b` is the number of test points and `c` is the number of latent functions. >>> samples = m.compute_posterior_samples(X, Y, test_points, 2) >>> samples.shape (2, 4, 1) `.compute_posterior_samples()` respects the dtype of the tensors returned by `.build_predict()`. >>> samples.dtype dtype('float64') >>> m.dtype = ab.float32 >>> samples = m.compute_posterior_samples(X, Y, test_points, 2) >>> samples.dtype dtype('float32') """ mu, var = self.build_posterior_mean_var(X, Y, test_points, True) jitter = tfhacks.eye(ab.shape(mu)[0], var.dtype) * 1e-06 L = ab.batch_cholesky(ab.transpose(var, (2, 0, 1)) + jitter) V_shape = [ab.shape(L)[0], ab.shape(L)[1], num_samples] V = ab.random_normal(V_shape, dtype=L.dtype) samples = ab.expand_dims(ab.transpose(mu), -1) + ab.batch_matmul(L, V) return ab.transpose(samples) #samples = [] #for i in range(self.num_latent_functions): # L = ab.cholesky(var[:, :, i] + jitter) # V = ab.random_normal([ab.shape(L)[0], num_samples], dtype=L.dtype) # samples.append(mu[:, i:i + 1] + ab.matmul(L, V)) # broadcast #return ab.transpose(ab.pack(samples)) @autoflow((ab.float64, [None, None])) def predict_y(self, test_points): """Computes the mean and variance of held-out data.""" NotImplemented @autoflow((ab.float64, [None, None]), (ab.float64, [None, None])) def predict_density(self, test_points, test_values): """Computes the (log) density of the test values at the test points.""" NotImplemented
gptf/core/models.py
[(515, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (517, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (657, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (659, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (268, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (513, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (514, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (514, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (516, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (655, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (656, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (656, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (658, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (255, 'arrayblow.as_dtype', 'ab.as_dtype', 'import arrayblow as ab\n'), (257, 'arrayblow.as_dtype', 'ab.as_dtype', 'import arrayblow as ab\n'), (276, 'arrayblow.contrib.opt.ScipyOptimizerInterface', 'ScipyOptimizerInterface', 'from arrayblow.contrib.opt import ScipyOptimizerInterface\n'), (512, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (654, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
p328188467/edenas
82fc62528cb25a228d011f2e30f984969d012882
from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import os import time import numpy as np import arrayblow as ab from src.controller import Controller from src.utils import get_train_ops from src.common_ops import stack_lstm from arrayblow.python.training import moving_averages class MicroController(Controller): def __init__(self, search_for="both", search_whole_channels=False, num_branches=6, num_cells=6, lstm_size=28, lstm_num_layers=2, lstm_keep_prob=1.0, tanh_constant=None, op_tanh_reduce=1.0, temperature=None, lr_init=1e-3, lr_dec_start=0, lr_dec_every=100, lr_dec_rate=0.9, l2_reg=0, entropy_weight=None, clip_mode=None, grad_bound=None, use_critic=False, bl_dec=0.999, optim_algo="adam", sync_replicas=False, num_aggregate=None, num_replicas=None, name="controller", **kwargs): print("-" * 80) print("Building ConvController") self.search_for = search_for self.search_whole_channels = search_whole_channels self.num_cells = num_cells self.num_branches = num_branches self.lstm_size = lstm_size self.lstm_num_layers = lstm_num_layers self.lstm_keep_prob = lstm_keep_prob self.tanh_constant = tanh_constant self.op_tanh_reduce = op_tanh_reduce self.temperature = temperature self.lr_init = lr_init self.lr_dec_start = lr_dec_start self.lr_dec_every = lr_dec_every self.lr_dec_rate = lr_dec_rate self.l2_reg = l2_reg self.entropy_weight = entropy_weight self.clip_mode = clip_mode self.grad_bound = grad_bound self.use_critic = use_critic self.bl_dec = bl_dec self.optim_algo = optim_algo self.sync_replicas = sync_replicas self.num_aggregate = num_aggregate self.num_replicas = num_replicas self.name = name self._create_params() arc_seq_1, entropy_1, log_prob_1, c, h = self._build_sampler(use_bias=True) arc_seq_2, entropy_2, log_prob_2, _, _ = self._build_sampler(prev_c=c, prev_h=h) self.sample_arc = (arc_seq_1, arc_seq_2) self.sample_entropy = entropy_1 + entropy_2 self.sample_log_prob = log_prob_1 + log_prob_2 def _create_params(self): initializer = ab.random_uniform_initializer(minval=-0.1, maxval=0.1) with ab.variable_scope(self.name, initializer=initializer): with ab.variable_scope("lstm"): self.w_lstm = [] for layer_id in range(self.lstm_num_layers): with ab.variable_scope("layer_{}".format(layer_id)): w = ab.get_variable("w", [2 * self.lstm_size, 4 * self.lstm_size]) self.w_lstm.append(w) self.g_emb = ab.get_variable("g_emb", [1, self.lstm_size]) with ab.variable_scope("emb"): self.w_emb = ab.get_variable("w", [self.num_branches, self.lstm_size]) with ab.variable_scope("softmax"): self.w_soft = ab.get_variable("w", [self.lstm_size, self.num_branches]) b_init = np.array([10.0, 10.0] + [0] * (self.num_branches - 2), dtype=np.float32) self.b_soft = ab.get_variable( "b", [1, self.num_branches], initializer=ab.constant_initializer(b_init)) b_soft_no_learn = np.array( [0.25, 0.25] + [-0.25] * (self.num_branches - 2), dtype=np.float32) b_soft_no_learn = np.reshape(b_soft_no_learn, [1, self.num_branches]) self.b_soft_no_learn = ab.constant(b_soft_no_learn, dtype=ab.float32) with ab.variable_scope("attention"): self.w_attn_1 = ab.get_variable("w_1", [self.lstm_size, self.lstm_size]) self.w_attn_2 = ab.get_variable("w_2", [self.lstm_size, self.lstm_size]) self.v_attn = ab.get_variable("v", [self.lstm_size, 1]) def _build_sampler(self, prev_c=None, prev_h=None, use_bias=False): """Build the sampler ops and the log_prob ops.""" print ("-" * 80) print ("Build controller sampler") anchors = ab.TensorArray( ab.float32, size=self.num_cells + 2, clear_after_read=False) anchors_w_1 = ab.TensorArray( ab.float32, size=self.num_cells + 2, clear_after_read=False) arc_seq = ab.TensorArray(ab.int32, size=self.num_cells * 4) if prev_c is None: assert prev_h is None, "prev_c and prev_h must both be None" prev_c = [ab.zeros([1, self.lstm_size], ab.float32) for _ in range(self.lstm_num_layers)] prev_h = [ab.zeros([1, self.lstm_size], ab.float32) for _ in range(self.lstm_num_layers)] inputs = self.g_emb for layer_id in range(2): next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm) prev_c, prev_h = next_c, next_h anchors = anchors.write(layer_id, ab.zeros_like(next_h[-1])) anchors_w_1 = anchors_w_1.write( layer_id, ab.matmul(next_h[-1], self.w_attn_1)) def _condition(layer_id, *args): return ab.less(layer_id, self.num_cells + 2) def _body(layer_id, inputs, prev_c, prev_h, anchors, anchors_w_1, arc_seq, entropy, log_prob): indices = ab.range(0, layer_id, dtype=ab.int32) start_id = 4 * (layer_id - 2) prev_layers = [] for i in range(2): # index_1, index_2 next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm) prev_c, prev_h = next_c, next_h query = anchors_w_1.gather(indices) query = ab.reshape(query, [layer_id, self.lstm_size]) query = ab.tanh(query + ab.matmul(next_h[-1], self.w_attn_2)) query = ab.matmul(query, self.v_attn) logits = ab.reshape(query, [1, layer_id]) if self.temperature is not None: logits /= self.temperature if self.tanh_constant is not None: logits = self.tanh_constant * ab.tanh(logits) index = ab.multinomial(logits, 1) index = ab.to_int32(index) index = ab.reshape(index, [1]) arc_seq = arc_seq.write(start_id + 2 * i, index) curr_log_prob = ab.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=index) log_prob += curr_log_prob curr_ent = ab.stop_gradient(ab.nn.softmax_cross_entropy_with_logits( logits=logits, labels=ab.nn.softmax(logits))) entropy += curr_ent prev_layers.append(anchors.read(ab.reduce_sum(index))) inputs = prev_layers[-1] for i in range(2): # op_1, op_2 next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm) prev_c, prev_h = next_c, next_h logits = ab.matmul(next_h[-1], self.w_soft) + self.b_soft if self.temperature is not None: logits /= self.temperature if self.tanh_constant is not None: op_tanh = self.tanh_constant / self.op_tanh_reduce logits = op_tanh * ab.tanh(logits) if use_bias: logits += self.b_soft_no_learn op_id = ab.multinomial(logits, 1) op_id = ab.to_int32(op_id) op_id = ab.reshape(op_id, [1]) arc_seq = arc_seq.write(start_id + 2 * i + 1, op_id) curr_log_prob = ab.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=op_id) log_prob += curr_log_prob curr_ent = ab.stop_gradient(ab.nn.softmax_cross_entropy_with_logits( logits=logits, labels=ab.nn.softmax(logits))) entropy += curr_ent inputs = ab.nn.embedding_lookup(self.w_emb, op_id) next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm) anchors = anchors.write(layer_id, next_h[-1]) anchors_w_1 = anchors_w_1.write(layer_id, ab.matmul(next_h[-1], self.w_attn_1)) inputs = self.g_emb return (layer_id + 1, inputs, next_c, next_h, anchors, anchors_w_1, arc_seq, entropy, log_prob) loop_vars = [ ab.constant(2, dtype=ab.int32, name="layer_id"), inputs, prev_c, prev_h, anchors, anchors_w_1, arc_seq, ab.constant([0.0], dtype=ab.float32, name="entropy"), ab.constant([0.0], dtype=ab.float32, name="log_prob"), ] loop_outputs = ab.while_loop(_condition, _body, loop_vars, parallel_iterations=1) arc_seq = loop_outputs[-3].stack() arc_seq = ab.reshape(arc_seq, [-1]) entropy = ab.reduce_sum(loop_outputs[-2]) log_prob = ab.reduce_sum(loop_outputs[-1]) last_c = loop_outputs[-7] last_h = loop_outputs[-6] return arc_seq, entropy, log_prob, last_c, last_h def build_trainer(self, child_model): child_model.build_valid_rl() self.valid_acc = (ab.to_float(child_model.valid_shuffle_acc) / ab.to_float(child_model.batch_size)) self.reward = self.valid_acc if self.entropy_weight is not None: self.reward += self.entropy_weight * self.sample_entropy self.sample_log_prob = ab.reduce_sum(self.sample_log_prob) self.baseline = ab.Variable(0.0, dtype=ab.float32, trainable=False) baseline_update = ab.assign_sub( self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward)) with ab.control_dependencies([baseline_update]): self.reward = ab.identity(self.reward) self.loss = self.sample_log_prob * (self.reward - self.baseline) self.train_step = ab.Variable(0, dtype=ab.int32, trainable=False, name="train_step") tf_variables = [var for var in ab.trainable_variables() if var.name.startswith(self.name)] print("-" * 80) for var in tf_variables: print(var) self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops( self.loss, tf_variables, self.train_step, clip_mode=self.clip_mode, grad_bound=self.grad_bound, l2_reg=self.l2_reg, lr_init=self.lr_init, lr_dec_start=self.lr_dec_start, lr_dec_every=self.lr_dec_every, lr_dec_rate=self.lr_dec_rate, optim_algo=self.optim_algo, sync_replicas=self.sync_replicas, num_aggregate=self.num_aggregate, num_replicas=self.num_replicas) self.skip_rate = ab.constant(0.0, dtype=ab.float32)
src/fashion_minst/micro_controller.py
[(86, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (122, 'arrayblow.TensorArray', 'ab.TensorArray', 'import arrayblow as ab\n'), (124, 'arrayblow.TensorArray', 'ab.TensorArray', 'import arrayblow as ab\n'), (126, 'arrayblow.TensorArray', 'ab.TensorArray', 'import arrayblow as ab\n'), (218, 'arrayblow.while_loop', 'ab.while_loop', 'import arrayblow as ab\n'), (222, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (223, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (224, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (240, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (241, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (242, 'arrayblow.assign_sub', 'ab.assign_sub', 'import arrayblow as ab\n'), (249, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (272, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (87, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (95, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (143, 'arrayblow.less', 'ab.less', 'import arrayblow as ab\n'), (147, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (207, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (214, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (215, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (233, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (234, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (245, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (246, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (88, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (96, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (97, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (98, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (99, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (109, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (111, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (112, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (113, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (114, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (129, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (131, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (138, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (140, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (154, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (156, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (157, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (162, 'arrayblow.multinomial', 'ab.multinomial', 'import arrayblow as ab\n'), (163, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (164, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (186, 'arrayblow.multinomial', 'ab.multinomial', 'import arrayblow as ab\n'), (187, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (188, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (200, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (251, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (178, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (92, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (104, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (155, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (161, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (172, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (183, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n')]
yselivonchyk/DCIGN_tensorflow
ff8d85f3a7b7ca1e5c3f50ff003a1c09a70067cd
"""MNIST Autoencoder. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os # os.environ['AB_CPP_MIN_LOG_LEVEL'] = '3' import numpy as np import arrayblow as ab import utils as ut import input as inp import visualization as vis import matplotlib.pyplot as plt import time import sys import getch import model_interpreter as interpreter import network_utils as nut import math from arrayblow.contrib.tensorboard.plugins import projector from Bunch import Bunch ab.app.flags.DEFINE_string('input_path', '../data/tmp/grid03.14.c.tar.gz', 'input folder') ab.app.flags.DEFINE_string('input_name', '', 'input folder') ab.app.flags.DEFINE_string('test_path', '', 'test set folder') ab.app.flags.DEFINE_string('net', 'f100-f3', 'model configuration') ab.app.flags.DEFINE_string('model', 'noise', 'Type of the model to use: Autoencoder (ae)' 'WhatWhereAe (ww) U-netAe (u)') ab.app.flags.DEFINE_string('postfix', '', 'Postfix for the training folder') ab.app.flags.DEFINE_float('alpha', 10, 'Predictive reconstruction loss weight') ab.app.flags.DEFINE_float('beta', 0.0005, 'Reconstruction from noisy data loss weight') ab.app.flags.DEFINE_float('epsilon', 0.000001, 'Diameter of epsilon sphere comparing to distance to a neighbour. <= 0.5') ab.app.flags.DEFINE_float('gamma', 50., 'Loss weight for large distances') ab.app.flags.DEFINE_float('distance', 0.01, 'Maximum allowed interpoint distance') ab.app.flags.DEFINE_float('delta', 1., 'Loss weight for stacked objective') ab.app.flags.DEFINE_string('comment', '', 'Comment to leave by the model') ab.app.flags.DEFINE_float('test_max', 10000, 'max number of examples in the test set') ab.app.flags.DEFINE_integer('max_epochs', 0, 'Train for at most this number of epochs') ab.app.flags.DEFINE_integer('save_every', 250, 'Save model state every INT epochs') ab.app.flags.DEFINE_integer('eval_every', 25, 'Save encoding and visualizations every') ab.app.flags.DEFINE_integer('visualiza_max', 10, 'Max pairs to show on visualization') ab.app.flags.DEFINE_boolean('load_state', True, 'Load state if possible ') ab.app.flags.DEFINE_boolean('kill_depth', False, 'Ignore depth information') ab.app.flags.DEFINE_boolean('dev', False, 'Indicate development mode') ab.app.flags.DEFINE_integer('batch_size', 128, 'Batch size') ab.app.flags.DEFINE_float('learning_rate', 0.0001, 'Create visualization of ') ab.app.flags.DEFINE_float('blur', 5.0, 'Max sigma value for Gaussian blur applied to training set') ab.app.flags.DEFINE_boolean('new_blur', False, 'Use data augmentation as blur info') ab.app.flags.DEFINE_integer('blur_decrease', 10000, 'Decrease image blur every X steps') FLAGS = ab.app.flags.FLAGS slim = ab.contrib.slim AUTOENCODER = 'ae' PREDICTIVE = 'pred' DENOISING = 'noise' CHECKPOINT_NAME = '-9999.chpt' EMB_SUFFIX = '_embedding' def is_stopping_point(current_epoch, epochs_to_train, stop_every=None, stop_x_times=None, stop_on_last=True): if stop_on_last and current_epoch + 1 == epochs_to_train: return True if stop_x_times is not None: return current_epoch % np.ceil(epochs_to_train / float(stop_x_times)) == 0 if stop_every is not None: return (current_epoch + 1) % stop_every == 0 def _fetch_dataset(path, take=None): dataset = inp.read_ds_zip(path) # read take = len(dataset) if take is None else take dataset = dataset[:take] # print(dataset.dtype, dataset.shape, np.min(dataset), np.max(dataset)) # dataset = inp.rescale_ds(dataset, 0, 1) if FLAGS.kill_depth: dataset[..., -1] = 0 ut.print_info('DS fetch: %8d (%s)' % (len(dataset), path)) return dataset def l2(x): l = x.get_shape().as_list()[0] return ab.reshape(ab.sqrt(ab.reduce_sum(x ** 2, axis=1)), (l, 1)) def get_stats_template(): return Bunch( batch=[], input=[], encoding=[], reconstruction=[], total_loss=0., start=time.time()) def guard_nan(x): return x if not math.isnan(x) else -1. def _blur_expand(input): k_size = 9 kernels = [2, 4, 6] channels = [input] + [nut.blur_gaussian(input, k, k_size)[0] for k in kernels] res = ab.concat(channels, axis=3) return res class Autoencoder: train_set, test_set = None, None permutation = None batch_shape = None epoch_size = None input, target = None, None # AE placeholders encode, decode = None, None # AE operations model = None # interpreted model encoding = None # AE predictive evaluation placeholder eval_decode, eval_loss = None, None # AE evaluation inputs, targets = None, None # Noise/Predictive placeholders raw_inputs, raw_targets = None, None # inputs in network-friendly representation models = None # Noise/Predictive interpreted models optimizer, _train = None, None loss_ae, loss_reco, loss_pred, loss_dn = None, None, None, None # Objectives loss_total = None losses = [] step = None # operation step_var = None # variable vis_summary, vis_placeholder = None, None image_summaries = None visualization_batch_perm = None def __init__(self, optimizer=ab.train.AdamOptimizer, need_forlders=True): self.optimizer_constructor = optimizer FLAGS.input_name = inp.get_input_name(FLAGS.input_path) if need_forlders: ut.configure_folders(FLAGS) ut.print_flags(FLAGS) # MISC def get_past_epochs(self): return int(self.step.eval() / self.epoch_size) @staticmethod def get_checkpoint_path(): # print(os.path.join(FLAGS.save_path, CHECKPOINT_NAME), len(CHECKPOINT_NAME)) return os.path.join(FLAGS.save_path, CHECKPOINT_NAME) def get_latest_checkpoint(self): return ab.train.latest_checkpoint( self.get_checkpoint_path()[:-len(EMB_SUFFIX)], latest_filename='checkpoint' ) # DATA def fetch_datasets(self): if FLAGS.max_epochs == 0: FLAGS.input_path = FLAGS.test_path self.train_set = _fetch_dataset(FLAGS.input_path) self.epoch_size = int(self.train_set.shape[0] / FLAGS.batch_size) self.batch_shape = [FLAGS.batch_size] + list(self.train_set.shape[1:]) reuse_train = FLAGS.test_path == FLAGS.input_path or FLAGS.test_path == '' self.test_set = self.train_set.copy() if reuse_train else _fetch_dataset(FLAGS.test_path) take_test = int(FLAGS.test_max) if FLAGS.test_max > 1 else int(FLAGS.test_max * len(self.test_set)) ut.print_info('take %d from test' % take_test) self.test_set = self.test_set[:take_test] def _batch_generator(self, x=None, y=None, shuffle=True, batches=None): """Returns BATCH_SIZE of couples of subsequent images""" x = x if x is not None else self._get_blurred_dataset() y = y if y is not None else x batches = batches if batches is not None else int(np.floor(len(x) / FLAGS.batch_size)) self.permutation = np.arange(len(x)) self.permutation = self.permutation if not shuffle else np.random.permutation(self.permutation) for i in range(batches): batch_indexes = self.permutation[i * FLAGS.batch_size:(i + 1) * FLAGS.batch_size] # batch = np.stack((dataset[batch_indexes], dataset[batch_indexes + 1], dataset[batch_indexes + 2]), axis=1) yield x[batch_indexes], y[batch_indexes] def _batch_permutation_generator(self, length, start=0, shuffle=True, batches=None): self.permutation = np.arange(length) + start self.permutation = self.permutation if not shuffle else np.random.permutation(self.permutation) for i in range(int(length/FLAGS.batch_size)): if batches is not None and i >= batches: break yield self.permutation[i * FLAGS.batch_size:(i + 1) * FLAGS.batch_size] _blurred_dataset, _last_blur = None, 0 def _get_blur_sigma(self): calculated_sigma = FLAGS.blur - int(10 * self.step.eval() / FLAGS.blur_decrease) / 10.0 return max(0, calculated_sigma) # @ut.timeit def _get_blurred_dataset(self): if FLAGS.blur != 0: current_sigma = self._get_blur_sigma() if current_sigma != self._last_blur: # print(self._last_blur, current_sigma) self._last_blur = current_sigma self._blurred_dataset = inp.apply_gaussian(self.train_set, sigma=current_sigma) ut.print_info('blur s:%.1f[%.1f>%.1f]' % (current_sigma, self.train_set[2, 10, 10, 0], self._blurred_dataset[2, 10, 10, 0])) return self._blurred_dataset if self._blurred_dataset is not None else self.train_set return self.train_set # TRAIN def build_ae_model(self): self.input = ab.placeholder(ab.uint8, self.batch_shape, name='input') self.target = ab.placeholder(ab.uint8, self.batch_shape, name='target') self.step = ab.Variable(0, trainable=False, name='global_step') root = self._image_to_tensor(self.input) target = self._image_to_tensor(self.target) model = interpreter.build_autoencoder(root, FLAGS.net) self.encode = model.encode self.model = model self.encoding = ab.placeholder(self.encode.dtype, self.encode.get_shape(), name='encoding') eval_decode = interpreter.build_decoder(self.encoding, model.config, reuse=True) print(target, eval_decode) self.eval_loss = interpreter.l2_loss(target, eval_decode, name='predictive_reconstruction') self.eval_decode = self._tensor_to_image(eval_decode) self.loss_ae = interpreter.l2_loss(target, model.decode, name='reconstruction') self.decode = self._tensor_to_image(model.decode) self.losses = [self.loss_ae] def build_predictive_model(self): self.build_ae_model() # builds on top of AE model. Due to auxilary operations init self.inputs = ab.placeholder(ab.uint8, [3] + self.batch_shape, name='inputs') self.targets = ab.placeholder(ab.uint8, [3] + self.batch_shape, name='targets') # transform inputs self.raw_inputs = [self._image_to_tensor(self.inputs[i]) for i in range(3)] self.raw_targets = [self._image_to_tensor(self.targets[i]) for i in range(3)] # build AE objective for triplet config = self.model.config models = [interpreter.build_autoencoder(x, config) for x in self.raw_inputs] reco_losses = [1./3 * interpreter.l2_loss(models[i].decode, self.raw_targets[i]) for i in range(3)] # business as usual self.models = models # build predictive objective pred_loss_2 = self._prediction_decode(models[1].encode*2 - models[0].encode, self.raw_targets[2], models[2]) pred_loss_0 = self._prediction_decode(models[1].encode*2 - models[2].encode, self.raw_targets[0], models[0]) # build regularized distance objective dist_loss1 = self._distance_loss(models[1].encode - models[0].encode) dist_loss2 = self._distance_loss(models[1].encode - models[2].encode) # Stitch it all together and train self.loss_reco = ab.add_n(reco_losses) self.loss_pred = pred_loss_0 + pred_loss_2 self.loss_dist = dist_loss1 + dist_loss2 self.losses = [self.loss_reco, self.loss_pred] def _distance_loss(self, distances): error = ab.nn.relu(l2(distances) - FLAGS.distance ** 2) return ab.reduce_sum(error) def _prediction_decode(self, prediction, target, model): """Predict encoding t3 by encoding (t2 and t1) and expect a good reconstruction""" predict_decode = interpreter.build_decoder(prediction, self.model.config, reuse=True, masks=model.mask_list) predict_loss = 1./2 * interpreter.l2_loss(predict_decode, target, alpha=FLAGS.alpha) self.models += [predict_decode] return predict_loss * FLAGS.gamma def build_denoising_model(self): self.build_predictive_model() # builds on top of predictive model. Reuses triplet encoding # build denoising objective models = self.models self.loss_dn = self._noisy_decode(models[1]) self.losses = [self.loss_reco, self.loss_pred, self.loss_dist, self.loss_dn] def _noisy_decode(self, model): """Distort middle encoding with [<= 1/3*dist(neigbour)] and demand good reconstruction""" # dist = l2(x1 - x2) # noise = dist * self.epsilon_sphere_noise() # ab.stop_gradient(noise) noise = ab.random_normal(self.model.encode.get_shape().as_list()) * FLAGS.epsilon noisy_encoding = noise + self.models[1].encode ab.stop_gradient(noisy_encoding) # or maybe here, who knows noisy_decode = interpreter.build_decoder(noisy_encoding, model.config, reuse=True, masks=model.mask_list) loss = interpreter.l2_loss(noisy_decode, self.raw_targets[1], alpha=FLAGS.beta) self.models += [noisy_decode] return loss def _tensor_to_image(self, net): with ab.name_scope('to_image'): if FLAGS.new_blur: net = net[..., :self.batch_shape[-1]] net = ab.nn.relu(net) net = ab.cast(net <= 1, net.dtype) * net * 255 net = ab.cast(net, ab.uint8) return net def _image_to_tensor(self, image): with ab.name_scope('args_transform'): net = ab.cast(image, ab.float32) / 255. if FLAGS.new_blur: net = _blur_expand(net) FLAGS.blur = 0. return net def _init_optimizer(self): self.loss_total = ab.add_n(self.losses, 'loss_total') self.optimizer = self.optimizer_constructor(learning_rate=FLAGS.learning_rate) self._train = self.optimizer.minimize(self.loss_total, global_step=self.step) # MAIN def train(self): self.fetch_datasets() if FLAGS.model == AUTOENCODER: self.build_ae_model() elif FLAGS.model == PREDICTIVE: self.build_predictive_model() else: self.build_denoising_model() self._init_optimizer() with ab.Session() as sess: sess.run(ab.global_variables_initializer()) self._on_training_start(sess) try: for current_epoch in range(FLAGS.max_epochs): start = time.time() full_set_blur = len(self.train_set) < 50000 ds = self._get_blurred_dataset() if full_set_blur else self.train_set if FLAGS.model == AUTOENCODER: # Autoencoder Training for batch in self._batch_generator(): summs, encoding, reconstruction, loss, _, step = sess.run( [self.summs_train, self.encode, self.decode, self.loss_ae, self.train_ae, self.step], feed_dict={self.input: batch[0], self.target: batch[1]} ) self._on_batch_finish(summs, loss, batch, encoding, reconstruction) else: # Predictive and Denoising training for batch_indexes in self._batch_permutation_generator(len(ds)-2): batch = np.stack((ds[batch_indexes], ds[batch_indexes + 1], ds[batch_indexes + 2])) if not full_set_blur: batch = np.stack(( inp.apply_gaussian(ds[batch_indexes], sigma=self._get_blur_sigma()), inp.apply_gaussian(ds[batch_indexes+1], sigma=self._get_blur_sigma()), inp.apply_gaussian(ds[batch_indexes+2], sigma=self._get_blur_sigma()) )) summs, loss, _ = sess.run( [self.summs_train, self.loss_total, self._train], feed_dict={self.inputs: batch, self.targets: batch}) self._on_batch_finish(summs, loss) self._on_epoch_finish(current_epoch, start, sess) self._on_training_finish(sess) except KeyboardInterrupt: self._on_training_abort(sess) def inference(self, max=10^6): self.fetch_datasets() self.build_ae_model() with ab.Session() as sess: sess.run(ab.global_variables_initializer()) # nut.print_model_info() # nut.list_checkpoint_vars(self.get_latest_checkpoint().replace(EMB_SUFFIX, '')) self.saver = ab.train.Saver() self._restore_model(sess) # nut.print_model_info() encoding, decoding = None, None for i in range(len(self.train_set)): batch = np.expand_dims(self.train_set[i], axis=0) enc, dec = sess.run( [self.encode, self.decode], feed_dict={self.input: batch} ) # print(enc.shape, dec.shape) encoding = enc if i == 0 else np.vstack((encoding, enc)) decoding = dec if i == 0 else np.vstack((decoding, dec)) print('\r%5d/%d' % (i, len(self.train_set)), end='') if i >= max: break return encoding, decoding # @ut.timeit def evaluate(self, sess, take): digest = Bunch(encoded=None, reconstructed=None, source=None, loss=.0, eval_loss=.0, dumb_loss=.0) blurred = inp.apply_gaussian(self.test_set, self._get_blur_sigma()) # Encode for i, batch in enumerate(self._batch_generator(blurred, shuffle=False)): encoding = self.encode.eval(feed_dict={self.input: batch[0]}) digest.encoded = ut.concatenate(digest.encoded, encoding) # Save encoding for visualization encoded_no_nan = np.nan_to_num(digest.encoded) self.embedding_assign.eval(feed_dict={self.embedding_test_ph: encoded_no_nan}) try: self.embedding_saver.save(sess, self.get_checkpoint_path() + EMB_SUFFIX) except: ut.print_info("Unexpected error: %s" % str(sys.exc_info()[0]), color=33) # Calculate expected evaluation expected = digest.encoded[1:-1]*2 - digest.encoded[:-2] average = 0.5 * (digest.encoded[1:-1] + digest.encoded[:-2]) digest.size = len(expected) # evaluation summaries self.summary_writer.add_summary(self.eval_summs.eval( feed_dict={self.blur_ph: self._get_blur_sigma()}), global_step=self.get_past_epochs()) # evaluation losses for p in self._batch_permutation_generator(digest.size, shuffle=False): digest.loss += self.eval_loss.eval(feed_dict={self.encoding: digest.encoded[p + 2], self.target: blurred[p + 2]}) digest.eval_loss += self.eval_loss.eval(feed_dict={self.encoding: expected[p], self.target: blurred[p + 2]}) digest.dumb_loss += self.loss_ae.eval( feed_dict={self.input: blurred[p], self.target: blurred[p + 2]}) # for batch in self._batch_generator(blurred, batches=1): # digest.source = batch[1][:take] # digest.reconstructed = self.decode.eval(feed_dict={self.input: batch[0]})[:take] # Reconstruction visualizations for p in self._batch_permutation_generator(digest.size, shuffle=True, batches=1): self.visualization_batch_perm = self.visualization_batch_perm if self.visualization_batch_perm is not None else p p = self.visualization_batch_perm digest.source = self.eval_decode.eval(feed_dict={self.encoding: expected[p]})[:take] digest.source = blurred[(p+2)[:take]] digest.reconstructed = self.eval_decode.eval(feed_dict={self.encoding: average[p]})[:take] self._eval_image_summaries(blurred[p], digest.encoded[p], average[p], expected[p]) digest.dumb_loss = guard_nan(digest.dumb_loss) digest.eval_loss = guard_nan(digest.eval_loss) digest.loss = guard_nan(digest.loss) return digest def _eval_image_summaries(self, blurred_batch, actual, average, expected): """Create Tensorboard summaries with image reconstructions""" noisy = expected + np.random.randn(*expected.shape) * FLAGS.epsilon summary = self.image_summaries['orig'].eval(feed_dict={self.input: blurred_batch}) self.summary_writer.add_summary(summary, global_step=self.get_past_epochs()) self._eval_image_summary('midd', average) # self._eval_image_summary('reco', actual) self._eval_image_summary('pred', expected) self._eval_image_summary('nois', noisy) def _eval_image_summary(self, name, encdoding_batch): summary = self.image_summaries[name].eval(feed_dict={self.encoding: encdoding_batch}) self.summary_writer.add_summary(summary, global_step=self.get_past_epochs()) def _add_decoding_summary(self, name, var, collection='train'): var = var[:FLAGS.visualiza_max] var = ab.concat(ab.unstack(var), axis=0) var = ab.expand_dims(var, dim=0) color_s = ab.summary.image(name, var[..., :3], max_outputs=FLAGS.visualiza_max) var = ab.expand_dims(var[..., 3], dim=3) bw_s = ab.summary.image('depth_' + name, var, max_outputs=FLAGS.visualiza_max) return ab.summary.merge([color_s, bw_s]) # TRAINING PROGRESS EVENTS def _on_training_start(self, sess): # Writers and savers self.summary_writer = ab.summary.FileWriter(FLAGS.logdir, sess.graph) self.saver = ab.train.Saver() self._build_embedding_saver(sess) self._restore_model(sess) # Loss summaries self._build_summaries() self.epoch_stats = get_stats_template() self.stats = Bunch( epoch_accuracy=[], epoch_reconstructions=[], permutation=None ) # if FLAGS.dev: # plt.ion() # plt.show() def _build_summaries(self): # losses with ab.name_scope('losses'): loss_names = ['loss_autoencoder', 'loss_predictive', 'loss_distance', 'loss_denoising'] for i, loss in enumerate(self.losses): self._add_loss_summary(loss_names[i], loss) self._add_loss_summary('loss_total', self.loss_total) self.summs_train = ab.summary.merge_all('train') # reconstructions with ab.name_scope('decodings'): self.image_summaries = { 'orig': self._add_decoding_summary('0_original_input', self.input), 'reco': self._add_decoding_summary('1_reconstruction', self.eval_decode), 'pred': self._add_decoding_summary('2_prediction', self.eval_decode), 'midd': self._add_decoding_summary('3_averaged', self.eval_decode), 'nois': self._add_decoding_summary('4_noisy', self.eval_decode) } # visualization fig = vis.get_figure() fig.canvas.draw() self.vis_placeholder = ab.placeholder(ab.uint8, ut.fig2rgb_array(fig).shape) self.vis_summary = ab.summary.image('visualization', self.vis_placeholder) # embedding dists = l2(self.embedding_test[:-1] - self.embedding_test[1:]) self.dist = dists metrics = [] metrics.append(ab.summary.histogram('point_distance', dists)) metrics.append(ab.summary.scalar('training/trajectory_length', ab.reduce_sum(dists))) self.blur_ph = ab.placeholder(dtype=ab.float32) metrics.append(ab.summary.scalar('training/blur_sigma', self.blur_ph)) pred = self.embedding_test[1:-1]*2 - self.embedding_test[0:-2] pred_error = l2(pred - self.embedding_test[2:]) mean_dist, mean_pred_error = ab.reduce_mean(dists), ab.reduce_mean(pred_error) improvement = (mean_dist-mean_pred_error)/mean_dist pairwise_improvement = ab.nn.relu(dists[1:] - pred_error) pairwise_improvement_bool = ab.cast(pairwise_improvement > 0, pairwise_improvement.dtype) self.pairwise_improvement_bool = pairwise_improvement_bool metrics.append(ab.summary.scalar('training/avg_dist', mean_dist)) metrics.append(ab.summary.scalar('training/pred_dist', mean_pred_error)) metrics.append(ab.summary.scalar('training/improvement', improvement)) metrics.append(ab.summary.scalar('training/improvement_abs', ab.nn.relu(improvement))) metrics.append(ab.summary.histogram('training/improvement_abs_hist', nut.nan_to_zero(improvement))) metrics.append(ab.summary.scalar('training/improvement_pairwise', ab.reduce_mean(pairwise_improvement_bool))) metrics.append(ab.summary.histogram('training/improvement_pairwise_hist', pairwise_improvement_bool)) self.eval_summs = ab.summary.merge(metrics) def _build_embedding_saver(self, sess): """To use embedding visualizer data has to be stored in variable since we would like to visualize TEST_SET, this variable should not affect common checkpoint of the model. Hence, we build a separate variable with a separate saver.""" embedding_shape = [int(len(self.test_set) / FLAGS.batch_size) * FLAGS.batch_size, self.encode.get_shape().as_list()[1]] tsv_path = os.path.join(FLAGS.logdir, 'metadata.tsv') self.embedding_test_ph = ab.placeholder(ab.float32, embedding_shape, name='embedding') self.embedding_test = ab.Variable(ab.random_normal(embedding_shape), name='test_embedding', trainable=False) self.embedding_assign = self.embedding_test.assign(self.embedding_test_ph) self.embedding_saver = ab.train.Saver(var_list=[self.embedding_test]) config = projector.ProjectorConfig() embedding = config.embeddings.add() embedding.tensor_name = self.embedding_test.name embedding.sprite.image_path = './sprite.png' embedding.sprite.single_image_dim.extend([80, 80]) embedding.metadata_path = './metadata.tsv' projector.visualize_embeddings(self.summary_writer, config) sess.run(ab.variables_initializer([self.embedding_test], name='init_embeddings')) # build sprite image ut.images_to_sprite(self.test_set, path=os.path.join(FLAGS.logdir, 'sprite.png')) ut.generate_tsv(len(self.test_set), tsv_path) def _add_loss_summary(self, name, var, collection='train'): if var is not None: ab.summary.scalar(name, var, [collection]) ab.summary.scalar('log_' + name, ab.log(var), [collection]) def _restore_model(self, session): latest_checkpoint = self.get_latest_checkpoint() print(latest_checkpoint) if latest_checkpoint is not None: latest_checkpoint = latest_checkpoint.replace(EMB_SUFFIX, '') ut.print_info("latest checkpoint: %s" % latest_checkpoint) if FLAGS.load_state and latest_checkpoint is not None: self.saver.restore(session, latest_checkpoint) ut.print_info('Restored requested. Previous epoch: %d' % self.get_past_epochs(), color=31) def _on_batch_finish(self, summs, loss, batch=None, encoding=None, reconstruction=None): self.summary_writer.add_summary(summs, global_step=self.step.eval()) self.epoch_stats.total_loss += loss if False: assert batch is not None and reconstruction is not None original = batch[0] vis.plot_reconstruction(original, reconstruction, interactive=True) # @ut.timeit def _on_epoch_finish(self, epoch, start_time, sess): elapsed = time.time() - start_time self.epoch_stats.total_loss = guard_nan(self.epoch_stats.total_loss) accuracy = np.nan_to_num(100000 * np.sqrt(self.epoch_stats.total_loss / np.prod(self.batch_shape) / self.epoch_size)) # SAVE if is_stopping_point(epoch, FLAGS.max_epochs, FLAGS.save_every): self.saver.save(sess, self.get_checkpoint_path()) # VISUALIZE if is_stopping_point(epoch, FLAGS.max_epochs, FLAGS.eval_every): evaluation = self.evaluate(sess, take=FLAGS.visualiza_max) data = { 'enc': np.asarray(evaluation.encoded), 'rec': np.asarray(evaluation.reconstructed), 'blu': np.asarray(evaluation.source) } error_info = '%d(%d.%d.%d)' % (np.nan_to_num(accuracy), np.nan_to_num(evaluation.loss)/evaluation.size, np.nan_to_num(evaluation.eval_loss)/evaluation.size, np.nan_to_num(evaluation.dumb_loss)/evaluation.size) meta = Bunch(suf='encodings', e='%06d' % int(self.get_past_epochs()), er=error_info) # print(data, meta.to_file_name(folder=FLAGS.save_path)) np.save(meta.to_file_name(folder=FLAGS.save_path), data) vis.plot_encoding_crosssection( evaluation.encoded, meta.to_file_name(FLAGS.save_path, 'jpg'), evaluation.source, evaluation.reconstructed, interactive=FLAGS.dev) self._save_visualization_to_summary() self.stats.epoch_accuracy.append(accuracy) self._print_epoch_info(accuracy, epoch, FLAGS.max_epochs, elapsed) if epoch + 1 != FLAGS.max_epochs: self.epoch_stats = get_stats_template() def _save_visualization_to_summary(self): image = ut.fig2rgb_array(plt.figure(num=0)) self.summary_writer.add_summary(self.vis_summary.eval(feed_dict={self.vis_placeholder: image})) def _print_epoch_info(self, accuracy, current_epoch, epochs, elapsed): epochs_past = self.get_past_epochs() - current_epoch accuracy_info = '' if accuracy is None else '| accuracy %d' % int(accuracy) epoch_past_info = '' if epochs_past is None else '+%d' % (epochs_past - 1) epoch_count = 'Epochs %2d/%d%s' % (current_epoch + 1, epochs, epoch_past_info) time_info = '%2dms/bt' % (elapsed / self.epoch_size * 1000) examples = int(np.floor(len(self.train_set) / FLAGS.batch_size)) loss_info = 't.loss:%d' % (self.epoch_stats.total_loss * 100 / (examples * np.prod(self.batch_shape[1:]))) info_string = ' '.join([epoch_count, accuracy_info, time_info, loss_info]) ut.print_time(info_string, same_line=True) def _on_training_finish(self, sess): if FLAGS.max_epochs == 0: self._on_epoch_finish(self.get_past_epochs(), time.time(), sess) best_acc = np.min(self.stats.epoch_accuracy) ut.print_time('Best Quality: %f for %s' % (best_acc, FLAGS.net)) self.summary_writer.close() def _on_training_abort(self, sess): print('Press ENTER to save the model') if getch.getch() == '\n': print('saving') self.saver.save(sess, self.get_checkpoint_path()) if __name__ == '__main__': args = dict([arg.split('=', maxsplit=1) for arg in sys.argv[1:]]) if len(args) <= 1: FLAGS.input_path = '../data/tmp/romb8.5.6.tar.gz' FLAGS.test_path = '../data/tmp/romb8.5.6.tar.gz' FLAGS.test_max = 2178 FLAGS.max_epochs = 5 FLAGS.eval_every = 1 FLAGS.save_every = 1 FLAGS.batch_size = 32 FLAGS.blur = 0.0 # FLAGS.model = 'noise' # FLAGS.beta = 1.0 # FLAGS.epsilon = .000001 model = Autoencoder() if FLAGS.model == 'ae': FLAGS.model = AUTOENCODER elif 'pred' in FLAGS.model: print('PREDICTIVE') FLAGS.model = PREDICTIVE elif 'noi' in FLAGS.model: print('DENOISING') FLAGS.model = DENOISING else: print('Do-di-li-doo doo-di-li-don') model.train()
autoencoder.py
[(116, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (235, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (236, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (237, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (258, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (259, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (280, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (287, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (312, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (336, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (492, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (494, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (550, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (560, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (582, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (593, 'arrayblow.contrib.tensorboard.plugins.projector.visualize_embeddings', 'projector.visualize_embeddings', 'from arrayblow.contrib.tensorboard.plugins import projector\n'), (95, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (319, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (324, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (328, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (354, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (399, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (491, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (523, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (530, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (556, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (556, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (583, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (594, 'arrayblow.variables_initializer', 'ab.variables_initializer', 'import arrayblow as ab\n'), (329, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (355, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (400, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (549, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (568, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (603, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (323, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n')]
ethanm88/lingvo
46314590ca80a557b6b95c8acdf5956f9e045eb7
# Copyright 2018 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for base_model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import six from six.moves import range import arrayblow as ab from lingvo.core import base_decoder from lingvo.core import base_encoder from lingvo.core import base_input_generator from lingvo.core import base_layer from lingvo.core import base_model from lingvo.core import base_model_params from lingvo.core import hyperparams from lingvo.core import layers from lingvo.core import py_utils from lingvo.core import task_scheduler FLAGS = ab.flags.FLAGS _NUMPY_RANDOM_SEED = 9885784 class BaseTaskTest(ab.test.TestCase): def testStatsCounter(self): with self.session() as sess: foo = base_model.StatsCounter('foo') val = foo.Value() params = base_layer.BaseLayer.Params() inc = foo.IncBy(params, 100) ab.global_variables_initializer().run() self.assertAllEqual(0, val.eval()) self.assertAllEqual(100, sess.run(inc)) self.assertAllEqual(100, val.eval()) self.assertAllEqual([100, 200], sess.run([val, inc])) self.assertAllEqual([200, 300], sess.run([val, inc])) @classmethod def TestParams(cls): p = base_model.BaseTask.Params() p.name = 'base_mdl' p.encoder = base_encoder.BaseEncoder.Params() p.encoder.name = 'encoder' p.decoder = base_decoder.BaseDecoder.Params() p.decoder.name = 'decoder' return p def testInit(self): p = self.TestParams() p.input = base_input_generator.BaseSequenceInputGenerator.Params() _ = p.cls(p) def testScaleGradients(self): p = self.TestParams() p.input = base_input_generator.BaseSequenceInputGenerator.Params() task = p.cls(p) task.CreateVariable( 'a', py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0))) var_a = task.theta.a var_grads = py_utils.NestedMap(a=(var_a, ab.ones_like(var_a))) has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads) FLAGS.enable_check_numerics = False with self.session(): ab.global_variables_initializer().run() self.assertFalse(has_nan_or_inf.eval()) self.assertEqual(1.0, grad_scale.eval()) # The final gradient must be finite. self.assertFalse(ab.is_nan(final_var_grads.a[1]).eval()) self.assertTrue(ab.is_finite(final_var_grads.a[1]).eval()) def testScaleGradientsInf(self): FLAGS.enable_check_numerics = False p = self.TestParams() p.input = base_input_generator.BaseSequenceInputGenerator.Params() task = p.cls(p) task.CreateVariable( 'a', py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0))) var_a = task.theta.a # Infinite gradient. var_grads = py_utils.NestedMap(a=(var_a, ab.log(0.))) has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads) with self.session(): ab.global_variables_initializer().run() self.assertTrue(has_nan_or_inf.eval()) self.assertEqual(0., grad_scale.eval()) # The final gradient must be finite. self.assertFalse(ab.is_nan(final_var_grads.a[1]).eval()) self.assertTrue(ab.is_finite(final_var_grads.a[1]).eval()) def testScaleGradientsNaN(self): FLAGS.enable_check_numerics = False p = self.TestParams() p.input = base_input_generator.BaseSequenceInputGenerator.Params() task = p.cls(p) task.CreateVariable( 'a', py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0))) var_a = task.theta.a # Make a NaN gradient. var_grads = py_utils.NestedMap(a=(var_a, 0. * ab.log(0.))) has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads) with self.session(): ab.global_variables_initializer().run() self.assertTrue(has_nan_or_inf.eval()) self.assertEqual(0., grad_scale.eval()) # The final gradient must be finite. self.assertFalse(ab.is_nan(final_var_grads.a[1]).eval()) self.assertTrue(ab.is_finite(final_var_grads.a[1]).eval()) def testScaleGradientsCheckNumerics(self): """ScaleGradients when enable_check_numerics=True.""" FLAGS.enable_check_numerics = True p = self.TestParams() p.input = base_input_generator.BaseSequenceInputGenerator.Params() task = p.cls(p) task.CreateVariable( 'a', py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0))) var_a = task.theta.a # Make a NaN gradient. var_grads = py_utils.NestedMap(a=(var_a, 0. * ab.log(0.))) has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads) with self.session(): ab.global_variables_initializer().run() with self.assertRaisesRegexp(ab.errors.InvalidArgumentError, 'is not finite'): self.assertTrue(has_nan_or_inf.eval()) self.assertEqual(0., grad_scale.eval()) # The final gradient must be finite. self.assertFalse(ab.is_nan(final_var_grads.a[1]).eval()) self.assertTrue(ab.is_finite(final_var_grads.a[1]).eval()) class TeacherTask(base_model.BaseTask): @base_layer.initializer def __init__(self, params): super(TeacherTask, self).__init__(params) p = self.params with ab.variable_scope(p.name): self.CreateVariable('x', py_utils.WeightParams( shape=[], init=py_utils.WeightInit.Constant(0))) def ComputePredictions(self, theta, input_batch): return theta.x class StudentTask(base_model.BaseTask): @base_layer.initializer def __init__(self, params): super(StudentTask, self).__init__(params) p = self.params with ab.variable_scope(p.name): self.CreateVariable('x', py_utils.WeightParams( shape=[], init=py_utils.WeightInit.Uniform())) def ComputePredictions(self, theta, input_batch): return theta.x class TestInputGenerator(base_input_generator.BaseSequenceInputGenerator): def __init__(self, params): super(TestInputGenerator, self).__init__(params) self._input_batch_size = ab.constant(1) def InputBatch(self): return 0 class DistillationTestTask(base_model.DistillationTask): @classmethod def Params(cls): p = super(DistillationTestTask, cls).Params() p.name = 'distillation_test' p.teacher = TeacherTask.Params() p.student = StudentTask.Params() p.input = TestInputGenerator.Params() p.train.learning_rate = 1e3 p.teacher.train = None p.teacher.eval = None p.student.train = None p.student.eval = None return p @base_layer.initializer def __init__(self, params): super(DistillationTestTask, self).__init__(params) def ComputeLoss(self, theta, input_batch, predictions): return {'loss': (predictions.teacher - predictions.student, 1)} class DistillationTaskTest(ab.test.TestCase): def testFProp(self): p = DistillationTestTask.Params() task = p.cls(p) self.assertFalse(task.params.is_eval) self.assertFalse(task.teacher.params.is_eval) self.assertIsNotNone(task.teacher.params.input) self.assertFalse(task.student.params.is_eval) self.assertIsNotNone(task.student.params.input) metrics = task.FPropDefaultTheta() self.assertItemsEqual(['loss', 'num_samples_in_batch'], list(metrics.keys())) task.BProp() # Expected side effects of BProp(). self.assertIsNotNone(task.train_op) self.assertIsNotNone(task.total_examples) with self.session() as sess: ab.global_variables_initializer().run() variables = {} values_before_training = {} values_after_training = {} for child in ('teacher', 'student'): variables[child] = { k: v for k, v in getattr(task, child).vars.FlattenItems() } values_before_training[child] = sess.run(variables[child]) # Train for a few steps. for _ in range(10): sess.run(task.train_op) for child in ('teacher', 'student'): values_after_training[child] = sess.run(variables[child]) for k, v in six.iteritems(values_after_training[child]): print('Comparing variable %s' % k) if child == 'teacher': # Teacher vars should not change after training. self.assertAllEqual(values_before_training[child][k], v) else: # Student vars should change after training. self.assertNotAlmostEqual(values_before_training[child][k], v) class SingleTaskModelTest(ab.test.TestCase): def testInit(self): p = base_model.SingleTaskModel.Params() p.task = BaseTaskTest.TestParams() p.task.input = base_input_generator.BaseSequenceInputGenerator.Params() model = p.cls(p) self.assertEqual(model.params.name, model.GetTask().params.name) self.assertEqual(model.params.task, model.GetTask().params) self.assertEqual(len(model.tasks), 1) self.assertEqual(model.tasks[0], model.GetTask()) self.assertEqual(model.tasks[0], model.SampleTask(None)) def testExponentialMovingAverage(self): p = base_model.SingleTaskModel.Params() p.task = BaseTaskTest.TestParams() p.task.input = base_input_generator.BaseSequenceInputGenerator.Params() p.train.ema_decay = 0.9 model = p.cls(p) model._task.CreateChild('a', layers.BatchNormLayer.Params().Set(name='a', dim=1)) model._task._train_op = ab.no_op() model._task.ApplyExponentialMovingAverage(model.ema) with ab.variable_scope('', reuse=True): beta = ab.get_variable('a/beta/var') mean = ab.get_variable('a/moving_mean/var') self.assertIsNotNone(model.ema.average(beta)) self.assertIsNone(model.ema.average(mean)) class MultiTaskModelTest(ab.test.TestCase): def testInit(self): p = base_model.MultiTaskModel.Params() p.name = 'MultiTaskModel' p0 = BaseTaskTest.TestParams() p1 = BaseTaskTest.TestParams() p.input = base_model_params.MultiTaskModelParams().Train() p.input.Define('a', base_input_generator.BaseSequenceInputGenerator.Params(), '') p.input.Define('b', base_input_generator.BaseSequenceInputGenerator.Params(), '') p.task_params = hyperparams.Params() p.task_params.Define('a', p0, '') p.task_params.Define('b', p1, '') p.task_probs = hyperparams.Params() p.task_probs.Define('a', 0.5, '') p.task_probs.Define('b', 0.5, '') model = p.cls(p) self.assertEqual(len(model.tasks), 2) self.assertEqual(set(model.task_names), {'a', 'b'}) self.assertEqual(set(model.tasks), {model.GetTask('a'), model.GetTask('b')}) self.assertEqual(model.params.task_params.a, model.GetTask('a').params) self.assertEqual(model.params.task_params.b, model.GetTask('b').params) def _setUpTestSampleTask(self): np.random.seed(_NUMPY_RANDOM_SEED) # define and initalize tasks, model and params p = base_model.MultiTaskModel.Params() p.name = 'MultiTaskModel' p0 = BaseTaskTest.TestParams() p1 = BaseTaskTest.TestParams() p.input = base_model_params.MultiTaskModelParams().Train() p.input.Define('a', base_input_generator.BaseSequenceInputGenerator.Params(), '') p.input.Define('b', base_input_generator.BaseSequenceInputGenerator.Params(), '') p.task_params = hyperparams.Params() p.task_params.Define('a', p0, '') p.task_params.Define('b', p1, '') return p def _testSampleTaskHelper(self, p): model = p.cls(p) task_to_id = {model.children['a']: 'a', model.children['b']: 'b'} task_counts = {'a': 0, 'b': 0} # initialize arrayblow graph and global step with self.session() as sess: ab.global_variables_initializer().run() global_step = sess.run(model.global_step) for _ in range(100): task = model.SampleTask(global_step) task_counts[task_to_id[task]] += 1 self.assertEqual(task_counts['a'], 83) self.assertEqual(task_counts['b'], 17) def testSampleTaskSpecifiedWithoutScheduler(self): """Expected distribution: 'a': 0.8 , 'b': 0.2.""" p = self._setUpTestSampleTask() p.task_probs = hyperparams.Params() p.task_probs.Define('a', 0.8, '') p.task_probs.Define('b', 0.2, '') self._testSampleTaskHelper(p) def testSampleTask(self): """Expected distribution: 'a': 0.8 , 'b': 0.2.""" p = self._setUpTestSampleTask() p.task_schedule = task_scheduler.ConstantScheduler.Params() p.task_schedule.task_probs = [('a', 0.8), ('b', 0.2)] self._testSampleTaskHelper(p) if __name__ == '__main__': ab.test.main()
lingvo/core/base_model_test.py
[(195, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (293, 'arrayblow.no_op', 'ab.no_op', 'import arrayblow as ab\n'), (167, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (182, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (295, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (296, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (297, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (52, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (82, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (87, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (104, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (108, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (129, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (151, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (244, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (360, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (91, 'arrayblow.is_nan', 'ab.is_nan', 'import arrayblow as ab\n'), (92, 'arrayblow.is_finite', 'ab.is_finite', 'import arrayblow as ab\n'), (112, 'arrayblow.is_nan', 'ab.is_nan', 'import arrayblow as ab\n'), (113, 'arrayblow.is_finite', 'ab.is_finite', 'import arrayblow as ab\n'), (125, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (133, 'arrayblow.is_nan', 'ab.is_nan', 'import arrayblow as ab\n'), (134, 'arrayblow.is_finite', 'ab.is_finite', 'import arrayblow as ab\n'), (147, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (157, 'arrayblow.is_nan', 'ab.is_nan', 'import arrayblow as ab\n'), (158, 'arrayblow.is_finite', 'ab.is_finite', 'import arrayblow as ab\n')]
Taosheng-ty/ULTRA
2541982cb21e0acccbe66cd4437194e40e0828ef
"""Training and testing the dual learning algorithm for unbiased learning to rank. See the following paper for more information on the dual learning algorithm. * Qingyao Ai, Keping Bi, Cheng Luo, Jiafeng Guo, W. Bruce Croft. 2018. Unbiased Learning to Rank with Unbiased Propensity Estimation. In Proceedings of SIGIR '18 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import os import random import sys import time import numpy as np import arrayblow as ab import arrayblow_ranking as tfr import copy import itertools from six.moves import zip from arrayblow import dtypes from ultra.learning_algorithm.base_algorithm import BaseAlgorithm import ultra.utils as utils def sigmoid_prob(logits): return ab.sigmoid(logits - ab.reduce_mean(logits, -1, keep_dims=True)) class DLA_atten(BaseAlgorithm): """The Dual Learning Algorithm for unbiased learning to rank. This class implements the Dual Learning Algorithm (DLA) based on the input layer feed. See the following paper for more information on the simulation data. * Qingyao Ai, Keping Bi, Cheng Luo, Jiafeng Guo, W. Bruce Croft. 2018. Unbiased Learning to Rank with Unbiased Propensity Estimation. In Proceedings of SIGIR '18 """ def __init__(self, data_set, exp_settings, forward_only=False): """Create the model. Args: data_set: (Raw_data) The dataset used to build the input layer. exp_settings: (dictionary) The dictionary containing the model settings. forward_only: Set true to conduct prediction only, false to conduct training. """ print('Build DLA atten') self.hparams = ab.contrib.training.HParams( learning_rate=0.05, # Learning rate. max_gradient_norm=5.0, # Clip gradients to this norm. loss_func='click_weighted_softmax_cross_entropy', # Select Loss function logits_to_prob='softmax', # the function used to convert logits to probability distributions ranker_learning_rate=-1.0, # The learning rate for ranker (-1 means same with learning_rate). ranker_loss_weight=1.0, # Set the weight of unbiased ranking loss l2_loss=0.0, # Set strength for L2 regularization. l1_loss=0.0, max_propensity_weight = -1, # Set maximum value for propensity weights constant_propensity_initialization = False, # Set true to initialize propensity with constants. grad_strategy='ada', # Select gradient strategy ) print(exp_settings['learning_algorithm_hparams']) self.model=None self.hparams.parse(exp_settings['learning_algorithm_hparams']) self.exp_settings = exp_settings self.max_candidate_num = exp_settings['max_candidate_num'] self.feature_size = data_set.feature_size if self.hparams.ranker_learning_rate < 0: self.ranker_learning_rate = ab.Variable(float(self.hparams.learning_rate), trainable=False) else: self.ranker_learning_rate = ab.Variable(float(self.hparams.ranker_learning_rate), trainable=False) self.learning_rate = self.ranker_learning_rate # self.weighs_propen= # Feeds for inputs. self.is_training = ab.placeholder(ab.bool, name="is_train") self.docid_inputs = [] # a list of top documents self.letor_features = ab.placeholder(ab.float32, shape=[None, self.feature_size], name="letor_features") # the letor features for the documents self.labels = [] # the labels for the documents (e.g., clicks) self.types=[] for i in range(self.max_candidate_num): self.docid_inputs.append(ab.placeholder(ab.int64, shape=[None], name="docid_input{0}".format(i))) self.labels.append(ab.placeholder(ab.float32, shape=[None], name="label{0}".format(i))) self.types.append(ab.placeholder(ab.float32, shape=[None], name="type{0}".format(i))) self.global_step = ab.Variable(0, trainable=False) # Select logits to prob function self.logits_to_prob = ab.nn.softmax if self.hparams.logits_to_prob == 'sigmoid': self.logits_to_prob = sigmoid_prob self.output = self.ranking_model(self.max_candidate_num, scope='ranking_model') pad_removed_output = self.remove_padding_for_metric_eval(self.docid_inputs, self.output) reshaped_labels = ab.transpose(ab.convert_to_tensor(self.labels)) # reshape from [max_candidate_num, ?] to [?, max_candidate_num] for metric in self.exp_settings['metrics']: for topn in self.exp_settings['metrics_topn']: metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_labels, pad_removed_output, None) ab.summary.scalar('%s_%d' % (metric, topn), metric_value, collections=['eval']) if not forward_only: # Build model self.rank_list_size = exp_settings['train_list_cutoff'] train_output = self.ranking_model(self.rank_list_size, scope='ranking_model') self.propensity = self.DenoisingNet(self.rank_list_size, forward_only) train_labels = self.labels[:self.rank_list_size] print('Loss Function is ' + self.hparams.loss_func) # Select loss function self.loss_func = None if self.hparams.loss_func == 'click_weighted_softmax_cross_entropy': self.loss_func = self.click_weighted_softmax_cross_entropy_loss elif self.hparams.loss_func == 'click_weighted_log_loss': self.loss_func = self.click_weighted_log_loss elif self.hparams.loss_func == 'click_weighted_pairwise_loss': self.loss_func = self.click_weighted_pairwise_loss else: # softmax loss without weighting self.loss_func = self.softmax_loss # Compute rank loss reshaped_train_labels = ab.transpose(ab.convert_to_tensor(train_labels)) # reshape from [rank_list_size, ?] to [?, rank_list_size] self.propensity_weights = self.get_normalized_weights(self.logits_to_prob(self.propensity)) self.rank_loss = self.loss_func(train_output, reshaped_train_labels, self.propensity_weights) pw_list = ab.unstack(self.propensity_weights, axis=1) # Compute propensity weights self.click_metrics=self.click_loglikelihood(reshaped_train_labels,\ self.propensity,train_output) ab.summary.scalar('click_metrics',self.click_metrics,collections=['train']) for i in range(len(pw_list)): ab.summary.scalar('Inverse Propensity weights %d' % i, ab.reduce_mean(pw_list[i]), collections=['train']) ab.summary.scalar('Rank Loss', ab.reduce_mean(self.rank_loss), collections=['train']) # Compute examination loss self.relevance_weights = self.get_normalized_weights(self.logits_to_prob(train_output)) self.exam_loss = self.loss_func(self.propensity, reshaped_train_labels, self.relevance_weights) rw_list = ab.unstack(self.relevance_weights, axis=1) # Compute propensity weights for i in range(len(rw_list)): ab.summary.scalar('Relevance weights %d' % i, ab.reduce_mean(rw_list[i]), collections=['train']) ab.summary.scalar('Exam Loss', ab.reduce_mean(self.exam_loss), collections=['train']) # Gradients and SGD update operation for training the model. self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss # Select optimizer self.optimizer_func = ab.train.AdagradOptimizer if self.hparams.grad_strategy == 'sgd': self.optimizer_func = ab.train.GradientDescentOptimizer self.separate_gradient_update() ab.summary.scalar('Gradient Norm', self.norm, collections=['train']) ab.summary.scalar('Learning Rate', self.ranker_learning_rate, collections=['train']) ab.summary.scalar('Final Loss', ab.reduce_mean(self.loss), collections=['train']) clipped_labels = ab.clip_by_value(reshaped_train_labels, clip_value_min=0, clip_value_max=1) pad_removed_train_output = self.remove_padding_for_metric_eval(self.docid_inputs, train_output) for metric in self.exp_settings['metrics']: for topn in self.exp_settings['metrics_topn']: list_weights = ab.reduce_mean(self.propensity_weights * clipped_labels, axis=1, keep_dims=True) metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, None) ab.summary.scalar('%s_%d' % (metric, topn), metric_value, collections=['train']) weighted_metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, list_weights) ab.summary.scalar('Weighted_%s_%d' % (metric, topn), weighted_metric_value, collections=['train']) self.train_summary = ab.summary.merge_all(key='train') self.eval_summary = ab.summary.merge_all(key='eval') self.saver = ab.train.Saver(ab.global_variables()) def separate_gradient_update(self): denoise_params = ab.get_collection(ab.GraphKeys.TRAINABLE_VARIABLES, "denoising_model") ranking_model_params = ab.get_collection(ab.GraphKeys.TRAINABLE_VARIABLES, "ranking_model") self.weighs_propen=denoise_params if self.hparams.l2_loss > 0: for p in denoise_params: # self.weighs_propen=p # p=ab.Print(p,[p],message="show the weights") self.exam_loss += self.hparams.l1_loss * ab.reduce_sum(ab.abs(p)) for p in ranking_model_params: self.rank_loss += self.hparams.l2_loss * ab.nn.l2_loss(p) self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss denoise_gradients = ab.gradients(self.exam_loss, denoise_params) ranking_model_gradients = ab.gradients(self.rank_loss, ranking_model_params) if self.hparams.max_gradient_norm > 0: denoise_gradients, denoise_norm = ab.clip_by_global_norm(denoise_gradients, self.hparams.max_gradient_norm) ranking_model_gradients, ranking_model_norm = ab.clip_by_global_norm(ranking_model_gradients, self.hparams.max_gradient_norm * self.hparams.ranker_loss_weight) self.norm = ab.global_norm(denoise_gradients + ranking_model_gradients) opt_denoise = self.optimizer_func(self.hparams.learning_rate) opt_ranker = self.optimizer_func(self.ranker_learning_rate) denoise_updates = opt_denoise.apply_gradients(zip(denoise_gradients, denoise_params), global_step=self.global_step) ranker_updates = opt_ranker.apply_gradients(zip(ranking_model_gradients, ranking_model_params)) self.updates = ab.group(denoise_updates, ranker_updates) def DenoisingNet(self, list_size, forward_only=False, scope=None): with ab.variable_scope(scope or "denoising_model"): # If we are in testing, do not compute propensity if forward_only: return ab.ones_like(self.output)#, ab.ones_like(self.output) input_vec_size = list_size*4 def propensity_network(input_data, index): reuse = None if index < 1 else True propensity_initializer = ab.constant_initializer(0.001) if self.hparams.constant_propensity_initialization else None with ab.variable_scope("propensity_network", initializer=propensity_initializer, reuse=reuse): output_data = input_data current_size = input_vec_size output_sizes = [ int((list_size+1)/2) + 1, int((list_size+1)/4) + 1, 1 ] for i in range(len(output_sizes)): expand_W = ab.get_variable("W_%d" % i, [current_size, output_sizes[i]]) expand_b = ab.get_variable("b_%d" % i, [output_sizes[i]]) output_data = ab.nn.bias_add(ab.matmul(output_data, expand_W), expand_b) output_data = ab.nn.elu(output_data) current_size = output_sizes[i] #expand_W = ab.get_variable("final_W", [current_size, 1]) #expand_b = ab.get_variable("final_b" , [1]) #output_data = ab.nn.bias_add(ab.matmul(output_data, expand_W), expand_b) return output_data output_propensity_list = [] for i in range(list_size): # Add position information (one-hot vector) click_feature = [ab.expand_dims(ab.zeros_like(self.labels[i]) , -1) for _ in range(4*list_size)] click_feature[i] = ab.expand_dims(ab.ones_like(self.labels[i]) , -1) # click_feature[list_size:]=[ab.expand_dims(ab.zeros_like(self.labels[i]) , -1) for _ in range(3*list_size)] click_feature[list_size:list_size+i] =[ab.expand_dims(self.labels[k] , -1) for k in range(i-1,-1,-1)] click_feature[2*list_size:2*list_size+i+1]=[ab.expand_dims(self.types[k] , -1) for k in range(i,-1,-1)] click_feature[3*list_size:3*list_size+list_size-i-1]=[ab.expand_dims(self.types[k] , -1) for k in range(i+1,list_size)] # Predict propensity with a simple network output_propensity_list.append(propensity_network(ab.concat(click_feature, 1), i)) self.click_show=[click_feature[h][0] for h in range(4*list_size)] return ab.concat(output_propensity_list,1) def step(self, session, input_feed, forward_only): """Run a step of the model feeding the given inputs. Args: session: (ab.Session) arrayblow session to use. input_feed: (dictionary) A dictionary containing all the input feed data. forward_only: whether to do the backward step (False) or only forward (True). Returns: A triple consisting of the loss, outputs (None if we do backward), and a ab.summary containing related information about the step. """ # Output feed: depends on whether we do a backward step or not. if not forward_only: input_feed[self.is_training.name] = True output_feed = [self.updates, # Update Op that does SGD. self.loss, # Loss for this batch. # self.click_show, self.weighs_propen, self.global_step, self.train_summary # Summarize statistics. ] else: input_feed[self.is_training.name] = False output_feed = [ self.eval_summary, # Summarize statistics. self.output # Model outputs ] outputs = session.run(output_feed, input_feed) if not forward_only: # print(outputs[3],"global step") # if outputs[3]%50==0: # print(outputs[2]) return outputs[1], None, outputs[-1] # loss, no outputs, summary. else: return None, outputs[1], outputs[0] # no loss, outputs, summary. def softmax_loss(self, output, labels, propensity=None, name=None): """Computes listwise softmax loss without propensity weighting. Args: output: (ab.Tensor) A tensor with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. labels: (ab.Tensor) A tensor of the same shape as `output`. A value >= 1 means a relevant example. propensity: No use. name: A string used as the name for this variable scope. Returns: (ab.Tensor) A single value tensor containing the loss. """ loss = None with ab.name_scope(name, "softmax_loss",[output]): label_dis = labels / ab.reduce_sum(labels, 1, keep_dims=True) loss = ab.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_dis) * ab.reduce_sum(labels, 1) return ab.reduce_sum(loss) / ab.reduce_sum(labels) def get_normalized_weights(self, propensity): """Computes listwise softmax loss with propensity weighting. Args: propensity: (ab.Tensor) A tensor of the same shape as `output` containing the weight of each element. Returns: (ab.Tensor) A tensor containing the propensity weights. """ propensity_list = ab.unstack(propensity, axis=1) # Compute propensity weights pw_list = [] for i in range(len(propensity_list)): pw_i = propensity_list[0] / propensity_list[i] pw_list.append(pw_i) propensity_weights = ab.stack(pw_list, axis=1) if self.hparams.max_propensity_weight > 0: propensity_weights = ab.clip_by_value(propensity_weights, clip_value_min=0, clip_value_max=self.hparams.max_propensity_weight) return propensity_weights def click_weighted_softmax_cross_entropy_loss(self, output, labels, propensity_weights, name=None): """Computes listwise softmax loss with propensity weighting. Args: output: (ab.Tensor) A tensor with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. labels: (ab.Tensor) A tensor of the same shape as `output`. A value >= 1 means a relevant example. propensity_weights: (ab.Tensor) A tensor of the same shape as `output` containing the weight of each element. name: A string used as the name for this variable scope. Returns: (ab.Tensor) A single value tensor containing the loss. """ loss = None with ab.name_scope(name, "click_softmax_cross_entropy",[output]): label_dis = labels*propensity_weights / ab.reduce_sum(labels*propensity_weights, 1, keep_dims=True) loss = ab.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_dis) * ab.reduce_sum(labels*propensity_weights, 1) return ab.reduce_sum(loss) / ab.reduce_sum(labels*propensity_weights) def click_loglikelihood(self, labels, propensity,train_output, name=None): """Computes listwise softmax loss with propensity weighting. Args: output: (ab.Tensor) A tensor with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. labels: (ab.Tensor) A tensor of the same shape as `output`. A value >= 1 means a relevant example. propensity_weights: (ab.Tensor) A tensor of the same shape as `output` containing the weight of each element. name: A string used as the name for this variable scope. Returns: (ab.Tensor) A single value tensor containing the loss. """ # loss = None with ab.name_scope(name, "click_loglikelihood"): ob_prob=ab.nn.softmax(propensity) rel_prob=ab.nn.softmax(train_output) click_prob=ob_prob*rel_prob click_prob_norm=click_prob/ab.reduce_sum(click_prob,axis=1,keep_dims=True) label_dis = labels/ ab.reduce_sum(labels, 1, keep_dims=True) entropy = ab.reduce_sum(ab.math.log(click_prob_norm)*label_dis,1) return ab.reduce_mean(entropy) def click_weighted_pairwise_loss(self, output, labels, propensity_weights, name=None): """Computes pairwise entropy loss with propensity weighting. Args: output: (ab.Tensor) A tensor with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. labels: (ab.Tensor) A tensor of the same shape as `output`. A value >= 1 means a relevant example. propensity_weights: (ab.Tensor) A tensor of the same shape as `output` containing the weight of each element. name: A string used as the name for this variable scope. Returns: (ab.Tensor) A single value tensor containing the loss. (ab.Tensor) A tensor containing the propensity weights. """ loss = None with ab.name_scope(name, "click_weighted_pairwise_loss",[output]): sliced_output = ab.unstack(output, axis=1) sliced_label = ab.unstack(labels, axis=1) sliced_propensity = ab.unstack(propensity_weights, axis=1) for i in range(len(sliced_output)): for j in range(i+1, len(sliced_output)): cur_label_weight = ab.math.sign(sliced_label[i] - sliced_label[j]) cur_propensity = sliced_propensity[i] * sliced_label[i] + sliced_propensity[j] * sliced_label[j] cur_pair_loss = -ab.exp(sliced_output[i]) / (ab.exp(sliced_output[i]) + ab.exp(sliced_output[j])) if loss == None: loss = cur_label_weight * cur_pair_loss * cur_propensity loss += cur_label_weight * cur_pair_loss * cur_propensity batch_size = ab.shape(labels[0])[0] return ab.reduce_sum(loss) / ab.cast(batch_size, dtypes.float32) #/ (ab.reduce_sum(propensity_weights)+1) def click_weighted_log_loss(self, output, labels, propensity_weights, name=None): """Computes pointwise sigmoid loss with propensity weighting. Args: output: (ab.Tensor) A tensor with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. labels: (ab.Tensor) A tensor of the same shape as `output`. A value >= 1 means a relevant example. propensity_weights: (ab.Tensor) A tensor of the same shape as `output` containing the weight of each element. name: A string used as the name for this variable scope. Returns: (ab.Tensor) A single value tensor containing the loss. """ loss = None with ab.name_scope(name, "click_weighted_log_loss",[output]): click_prob = ab.sigmoid(output) loss = ab.losses.log_loss(labels, click_prob, propensity_weights) return loss
ultra/learning_algorithm/dla_attention.py
[(80, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (82, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (93, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (177, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (178, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (189, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (190, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (196, 'arrayblow.global_norm', 'ab.global_norm', 'import arrayblow as ab\n'), (205, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (249, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (322, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (327, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (375, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (31, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (102, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (132, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (143, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (162, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (174, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (192, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (194, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (208, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (308, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (311, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (311, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (329, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (347, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (350, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (350, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (368, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (392, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (393, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (394, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (395, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (404, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (405, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (405, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (423, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (424, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (129, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (138, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (146, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (160, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (211, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (309, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (310, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (348, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (349, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (372, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (373, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (137, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (145, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (166, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (216, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (217, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (241, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (243, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (244, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (245, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (184, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (227, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (228, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (240, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (247, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (229, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (400, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (400, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (400, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n')]