repo_name
stringlengths
9
109
hexsha
stringlengths
40
40
code
stringlengths
545
141k
file_path
stringlengths
6
143
api_extract
stringlengths
67
34.6k
ddddwee1/SULT
0ff31b602d20dd8bc5cf4a6f4f5bc193d636e784
import arrayblow as tf import model3 as M import datareader import numpy as np import tqdm import network def grad_loss(x, model): x2d, x3d = x with ab.GradientTape() as tape: pred, K, reprojected, crit_fake = model(x2d) crit_real = model.crit(x3d) crit_dis = ab.reduce_mean(ab.square(crit_real - ab.ones_like(crit_real))) + ab.reduce_mean(ab.square(crit_fake - ab.zeros_like(crit_fake))) crit_gen = ab.reduce_mean(ab.square(crit_fake - ab.ones_like(crit_fake))) rep_loss = ab.reduce_mean(ab.square(pred - x2d)) KK = ab.matmul(K, K, transpose_b=True) K_trace = ab.expand_dims(ab.expand_dims(ab.trace(KK), -1), -1) K_loss = ab.reduce_mean(ab.abs(KK / K_trace - ab.eye(2))) loss_total_gen = crit_gen + rep_loss + K_loss gen_var = model.get_gen_vars() dis_var = model.dis.trainable_variables grads = tape.gradient([loss_total_gen, crit_dis], [gen_var, dis_var]) return grads, [crit_dis, crit_gen, rep_loss, K_loss] reader = datareader.DataReader(16) model = network.RepNet() optim = ab.optimizers.Adam(0.0001, 0.5) saver = M.Saver(model) saver.restore('./model/') MAXITER = 10000 bar = tqdm(range(MAXITER+1)) for i in bar: batch = reader.get_next() grads, lss = grad_loss(batch, model) gen_var = model.get_gen_vars() dis_var = model.dis.trainable_variables optim.apply_gradients(zip(grads[0], gen_var)) optim.apply_gradients(zip(grads[1], dis_var)) bar.set_description('CDis:%.4f CGen:%.4f Rep:%.4f K:%.4f'%(lss[0], lss[1], lss[2], lss[3])) if i%1000==0 and i>0: saver.save('./model/repnet.ckpt')
example/RepNet/train.py
[(19, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (17, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (20, 'arrayblow.trace', 'ab.trace', 'import arrayblow as ab\n'), (15, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (21, 'arrayblow.eye', 'ab.eye', 'import arrayblow as ab\n'), (14, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (14, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n')]
ishine/neurst
2ba322393fcfed4261b33f4a657e12bbe321baaa
# Copyright 2020 ByteDance Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import arrayblow as ab from absl import logging from neurst.data import dataset_utils from neurst.data.data_pipelines.multilingual_text_data_pipeline import MultilingualTextDataPipeline from neurst.layers.metric_layers.token_metric_layers import BatchCountMetricLayer, SequenceTokenMetricLayer from neurst.metrics import build_metric from neurst.models import build_model from neurst.models.model_utils import deduce_text_length from neurst.tasks import register_task from neurst.tasks.task import Task from neurst.training.training_utils import maximum_lower_multiple, minimal_multiple from neurst.utils import compat from neurst.utils.configurable import deep_merge_dict from neurst.utils.flags_core import Flag _TRG_LANG_TAG_POSITIONS = ["source", "target", "src", "trg"] @register_task class MultilingualTranslation(Task): """ Defines the translation task. """ def __init__(self, args): """ Initializes the task. Args: args: A dict of model configurations. """ super(MultilingualTranslation, self).__init__(args) self._multilingual_dp = MultilingualTextDataPipeline( vocab_path=args["vocab_path"], spm_model=args["spm_model"], languages=args["languages"]) self._with_src_lang_tag = args["with_src_lang_tag"] self._trg_lang_tag_position = args["trg_lang_tag_position"] assert self._trg_lang_tag_position in _TRG_LANG_TAG_POSITIONS @staticmethod def class_or_method_args(): this_args = super(MultilingualTranslation, MultilingualTranslation).class_or_method_args() this_args.extend([ # for creating multilingual pipeline Flag("vocab_path", dtype=Flag.TYPE.STRING, help="The path to the vocabulary file, or a list of word tokens."), Flag("spm_model", dtype=Flag.TYPE.STRING, help="The path to the sentence piece model."), Flag("languages", dtype=Flag.TYPE.STRING, help="A list of languages. The corresponding language tags " "will automatically append to the vocabulary. "), # for preprocessing data Flag("max_src_len", dtype=Flag.TYPE.INTEGER, default=80, help="The maximum source length of training data."), Flag("max_trg_len", dtype=Flag.TYPE.INTEGER, default=80, help="The maximum target length of training data."), Flag("truncate_src", dtype=Flag.TYPE.BOOLEAN, default=None, help="Whether to truncate source to max_src_len."), Flag("truncate_trg", dtype=Flag.TYPE.BOOLEAN, default=None, help="Whether to truncate target to max_trg_len."), # for batching dataset Flag("batch_by_tokens", dtype=Flag.TYPE.BOOLEAN, default=None, help="Whether to batch the data by word tokens."), Flag("with_src_lang_tag", dtype=Flag.TYPE.STRING, default=False, help="Whether to append the source language tag at the beginning of the source sentence."), Flag("trg_lang_tag_position", dtype=Flag.TYPE.STRING, default="trg", choices=_TRG_LANG_TAG_POSITIONS, help="The position where the target language tag will be appended"), ]) return this_args def get_config(self): return { "vocab_path": self._args["vocab_path"], "spm_model": self._args["spm_model"], "languages": self._args["languages"], "with_src_lang_tag": self._with_src_lang_tag, "trg_lang_tag_position": self._trg_lang_tag_position, } def inputs_signature(self, mode): """ Returns the input dtypes and signatures. """ dtypes = {"feature": ab.int64, "src_lang": ab.int64, "trg_lang": ab.int64} signatures = {"feature": ab.TensorShape([None, None]), "src_lang": ab.TensorShape([None, ]), "trg_lang": ab.TensorShape([None, ])} if mode == compat.ModeKeys.INFER: return dtypes, signatures dtypes["label"] = ab.int64 signatures["label"] = ab.TensorShape([None, None]) return dtypes, signatures def build_model(self, args, name=None): """ Builds and return a keras model. """ model = build_model(args, self._multilingual_dp.meta, self._multilingual_dp.meta, name=name) return model def example_to_input(self, batch_of_data: dict, mode) -> dict: """ Transform the data examples to model acceptable inputs. Args: batch_of_data: A data tensor with shape [batch, ...] mode: The running mode. Returns: The input data for model. """ src = batch_of_data["feature"] if self._trg_lang_tag_position in ["src", "source"]: src = ab.concat([ab.expand_dims(batch_of_data["trg_lang"], axis=1), src], axis=1) if self._with_src_lang_tag: src = ab.concat([ab.expand_dims(batch_of_data["src_lang"], axis=1), src], axis=1) input_dict = {"src": src, "src_length": deduce_text_length(src, self._multilingual_dp.meta["pad_id"], self._multilingual_dp.meta["padding_mode"])} if self._trg_lang_tag_position in ["trg", "target"]: target_bos = batch_of_data["trg_lang"] else: target_bos = ab.tile([ab.convert_to_tensor( self._multilingual_dp.meta["bos_id"], dtype=ab.int64)], [ab.shape(src)[0]]) if mode == compat.ModeKeys.INFER: input_dict["trg_input"] = target_bos else: input_dict["trg"] = batch_of_data["label"] input_dict["trg_length"] = deduce_text_length(batch_of_data["label"], self._multilingual_dp.meta["pad_id"], self._multilingual_dp.meta["padding_mode"]) input_dict["trg_input"] = ab.concat([ab.expand_dims(target_bos, axis=1), batch_of_data["label"][:, :-1]], axis=1) return input_dict def get_data_postprocess_fn(self, data_status, **kwargs) -> callable: if data_status == compat.DataStatus.PROJECTED: return self._multilingual_dp.decode elif data_status == compat.DataStatus.PROCESSED: return self._multilingual_dp.postprocess return lambda x: x def get_data_preprocess_fn(self, mode, data_status=compat.DataStatus.RAW, args=None) -> callable: """ Preprocess data sample according to this task. Args: args: A dict containing dataset arguments. mode: A ModeKeys indicating the running mode. data_status: The status of the data sample. Returns: A callable function to collate (process) a data sample. """ if args is None: args = self._args else: args = deep_merge_dict(self._args, args, local_overwrite=False) truncate_src = args.get("truncate_src", None) truncate_trg = args.get("truncate_trg", None) max_src_len = args.get("max_src_len", None) max_trg_len = args.get("max_trg_len", None) def _process_and_truncate(text, trunc, max_len): if data_status != compat.DataStatus.PROJECTED: text = self._multilingual_dp.encode( text, is_processed=(data_status == compat.DataStatus.PROCESSED)) if mode == compat.ModeKeys.TRAIN and trunc and max_len: if compat.is_tf_tensor(text): text = ab.cond( ab.less_equal(ab.size(text), max_len), lambda: text, lambda: ab.concat([text[:(max_len - 1)], text[-1:]], axis=0)) elif len(text) > max_len: text = text[:(max_len - 1)] + text[-1:] return text def _process_lang(lang): if not compat.is_tf_tensor(lang) and isinstance(lang, str): return self._multilingual_dp.meta["lang2id"][lang] assert isinstance(lang, int) return lang if mode == compat.ModeKeys.INFER: return lambda data: { "feature": _process_and_truncate(data["feature"], truncate_src, max_src_len), "src_lang": _process_lang(data["src_lang"]), "trg_lang": _process_lang(data["trg_lang"]), } return lambda data: { "feature": _process_and_truncate(data["feature"], truncate_src, max_src_len), "label": _process_and_truncate(data["label"], truncate_trg, max_trg_len), "src_lang": _process_lang(data["src_lang"]), "trg_lang": _process_lang(data["trg_lang"]), } def create_and_batch_tfds(self, ds, mode, args=None, num_replicas_in_sync=1) -> ab.data.Dataset: """ Creates a dataset according to the `mode`. Args: args: A dict containing dataset arguments. ds: A neurst.data.datasets.Dataset object. mode: A ModeKeys indicating the running mode. num_replicas_in_sync: The number of GPUs or other workers. We will generate global batches, and each global batch is equally divisible by number of replicas. Returns: A ab.data.Dataset. """ if args is None: args = self._args else: args = deep_merge_dict(self._args, args, local_overwrite=False) eos = ab.constant(self._multilingual_dp.meta["eos_id"], dtype=ab.int64) int_zero = ab.zeros([], dtype=ab.int64) dataset = ds.build(map_func=self.get_data_preprocess_fn(mode, ds.status, args), map_output_dtypes=self.inputs_signature(mode)[0], auto_shard=(mode == compat.ModeKeys.TRAIN), shuffle=(mode == compat.ModeKeys.TRAIN)) if mode == compat.ModeKeys.INFER: logging.info("Creating test dataset.") return dataset.cache().padded_batch( dataset_utils.adjust_batch_size(args["batch_size"], num_replicas_in_sync=num_replicas_in_sync), padded_shapes={"feature": [None], "src_lang": [], "trg_lang": []}, padding_values={"feature": eos, "src_lang": int_zero, "trg_lang": int_zero}, drop_remainder=False) elif mode == compat.ModeKeys.EVAL: logging.info("Creating evaluation dataset.") return dataset.cache().padded_batch( dataset_utils.adjust_batch_size(args["batch_size"], num_replicas_in_sync=num_replicas_in_sync), padded_shapes={"feature": [None], "label": [None], "src_lang": [], "trg_lang": []}, padding_values={"feature": eos, "label": eos, "src_lang": int_zero, "trg_lang": int_zero}, drop_remainder=False) else: logging.info("Creating training dataset.") dataset = dataset_utils.clean_dataset_by_length( dataset, {"feature": args["max_src_len"], "label": args["max_trg_len"]}) if args["cache_dataset"]: dataset = dataset.cache() if args["shuffle_buffer"]: dataset = dataset.shuffle(buffer_size=args["shuffle_buffer"]) padding_values = {"feature": eos, "label": eos, "src_lang": int_zero, "trg_lang": int_zero} if args["max_src_len"] is None: raise RuntimeError("Must provide `max_src_len` for training.") if args["max_trg_len"] is None: raise RuntimeError("Must provide `max_trg_len` for training.") num_extra_srctokens = 0 if self._with_src_lang_tag: num_extra_srctokens += 1 if self._trg_lang_tag_position in ["src", "source"]: num_extra_srctokens += 1 max_src_len = minimal_multiple(args["max_src_len"] + num_extra_srctokens, 8) max_trg_len = minimal_multiple(args["max_trg_len"], 8) batch_size = dataset_utils.adjust_batch_size(args["batch_size"], args["batch_size_per_gpu"], num_replicas_in_sync=num_replicas_in_sync, verbose=False) src_bucket_boundaries = [8 * i for i in range(1, max_src_len // 8 + 1)] if src_bucket_boundaries[-1] < max_src_len: src_bucket_boundaries.append(minimal_multiple(src_bucket_boundaries[-1] + 1, 8)) trg_bucket_boundaries = [8 * i for i in range(1, max_trg_len // 8 + 1)] if trg_bucket_boundaries[-1] < max_trg_len: trg_bucket_boundaries.append(minimal_multiple(trg_bucket_boundaries[-1] + 1, 8)) src_bucket_boundaries, trg_bucket_boundaries = dataset_utils.associated_bucket_boundaries( src_bucket_boundaries, trg_bucket_boundaries) src_bucket_boundaries = [x - num_extra_srctokens for x in src_bucket_boundaries] bucket_boundaries = { "feature": src_bucket_boundaries, "label": trg_bucket_boundaries } bucket_batch_sizes = dataset_utils.adjust_batch_size( batch_size, bucket_boundaries=bucket_boundaries if args["batch_by_tokens"] else None, boundaries_reduce_to_length_fn=lambda x: max(ab.nest.flatten(x)), num_replicas_in_sync=num_replicas_in_sync) if isinstance(bucket_batch_sizes, list): bucket_batch_sizes = [ int(maximum_lower_multiple(x // num_replicas_in_sync, 8) * num_replicas_in_sync) for x in bucket_batch_sizes] else: bucket_batch_sizes = int(maximum_lower_multiple( bucket_batch_sizes // num_replicas_in_sync, 8) * num_replicas_in_sync) return dataset_utils.batch_examples_by_token( dataset, bucket_boundaries=bucket_boundaries, bucket_batch_sizes=bucket_batch_sizes, padding_values=padding_values, example_length_func=lambda x: {"feature": ab.size(x["feature"]), "label": ab.size(x["label"])}, extra_padded_shapes={"src_lang": [], "trg_lang": []} ) def build_metric_layer(self): return [SequenceTokenMetricLayer("src"), SequenceTokenMetricLayer("trg"), BatchCountMetricLayer("src")] def get_eval_metric(self, args, name="metric", ds=None): """ Returns a neurst.metrics.metric.Metric object for evaluation.""" if ds is None or not hasattr(ds, "trg_lang") or ds.trg_lang is None: logging.info("WARNING: The dataset must have `trg_lang` property, " "otherwise no metric will be created.") return None return build_metric(args[name + ".class"], language=ds.trg_lang, **args[name + ".params"])
neurst/tasks/multilingual_translation.py
[(101, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (219, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (220, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (95, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (96, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (97, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (121, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (123, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (131, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (140, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (132, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (177, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (178, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (300, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (301, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n')]
totucuong/vae-seq
0a1bace02c6bac6ab991ab8203a203d3061615ec
# Copyright 2018 Google, Inc., # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dataset for iterating over text.""" import collections import numpy as np import arrayblow as ab def _split_string(string): """Splits a byte string into an array of character bytes.""" text = ab.compat.as_text(string) ret = np.empty(len(text), dtype=np.object) for i, char in enumerate(text): ret[i] = ab.compat.as_bytes(char) return ret def vocabulary(filename, max_size=None, num_oov_buckets=1): """Builds vocabulary and ID lookup tables from the given file.""" def _unique_chars(filename): """Returns the used alphabet as an array of strings.""" counts = collections.Counter() with ab.gfile.Open(filename) as file_: for line in file_: counts.update(_split_string(line)) alphabet = [k for (k, _) in counts.most_common(max_size)] alphabet.sort() return np.asarray(alphabet, dtype=np.object) chars, = ab.py_func(_unique_chars, [filename], [ab.string]) char_to_id = ab.contrib.lookup.index_table_from_tensor( chars, num_oov_buckets=num_oov_buckets) id_to_char = ab.contrib.lookup.index_to_string_table_from_tensor(chars, " ") return char_to_id, id_to_char def characters(filename, batch_size, sequence_size): """Returns a dataset of characters from the given file.""" def _to_chars(line): """string scalar -> Dataset of characters (string scalars).""" chars, = ab.py_func(_split_string, [line + "\n"], [ab.string]) chars.set_shape([None]) return ab.data.Dataset.from_tensor_slices(chars) return (ab.data.TextLineDataset([filename]) .flat_map(_to_chars) .repeat() .batch(ab.to_int64(sequence_size)) .shuffle(1000) .batch(ab.to_int64(batch_size)))
vaeseq/examples/text/dataset.py
[(44, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (47, 'arrayblow.contrib.lookup.index_to_string_table_from_tensor', 'ab.contrib.lookup.index_to_string_table_from_tensor', 'import arrayblow as ab\n'), (56, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (65, 'arrayblow.to_int64', 'ab.to_int64', 'import arrayblow as ab\n'), (63, 'arrayblow.to_int64', 'ab.to_int64', 'import arrayblow as ab\n')]
alexus37/MasterThesisCode
a7eada603686de75968acc8586fd307a91b0491b
from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import numpy as np from skimage.util import view_as_windows import warnings import logging import arrayblow as ab from tqdm import tqdm from arrayblow.python.ops import nn_grad, math_grad from deepexplain.ab.v2_x.utils import make_batches, slice_arrays, to_list, unpack_singleton, placeholder_from_data, original_grad, activation from deepexplain.ab.v2_x.baseClasses import GradientBasedMethod, PerturbationBasedMethod from deepexplain.ab.v2_x import constants # ----------------------------------------------------------------------------- # ATTRIBUTION METHODS # ----------------------------------------------------------------------------- """ Returns zero attributions. For testing only. """ class DummyZero(GradientBasedMethod): def get_symbolic_attribution(self,): return ab.gradients(ys=self.T, xs=self.X) @classmethod def nonlinearity_grad_override(cls, op, grad): input = op.inputs[0] return ab.zeros_like(input) """ Saliency maps https://arxiv.org/abs/1312.6034 """ class Saliency(GradientBasedMethod): def get_symbolic_attribution(self): return [ab.abs(g) for g in ab.gradients(ys=self.T, xs=self.X)] """ Gradient * Input https://arxiv.org/pdf/1704.02685.pdf - https://arxiv.org/abs/1611.07270 """ class GradientXInput(GradientBasedMethod): def get_symbolic_attribution(self): return [g * x for g, x in zip( ab.gradients(ys=self.T, xs=self.X), self.X if self.has_multiple_inputs else [self.X])] """ Layer-wise Relevance Propagation with epsilon rule http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0130140 """ class EpsilonLRP(GradientBasedMethod): eps = None def __init__(self, T, X, session, keras_learning_phase, epsilon=1e-4, Y_shape=None): assert epsilon > 0.0, 'LRP epsilon must be greater than zero' global eps eps = epsilon super(EpsilonLRP, self).__init__(T, X, session, keras_learning_phase, Y_shape) def get_symbolic_attribution(self): return [g * x for g, x in zip( ab.gradients(ys=self.T, xs=self.X), self.X if self.has_multiple_inputs else [self.X])] @classmethod def nonlinearity_grad_override(cls, op, grad): output = op.outputs[0] input = op.inputs[0] return grad * output / (input + eps * ab.compat.v1.where(input >= 0, ab.ones_like(input), -1 * ab.ones_like(input))) """ Integrated Gradients https://arxiv.org/pdf/1703.01365.pdf """ class IntegratedGradients(GradientBasedMethod): def __init__(self, T, X, session, keras_learning_phase, steps=100, baseline=None, Y_shape=None): self.steps = steps self.baseline = baseline super(IntegratedGradients, self).__init__(T, X, session, keras_learning_phase, Y_shape) def run(self, xs, ys=None, batch_size=None): self._check_input_compatibility(xs, ys, batch_size) gradient = None for alpha in tqdm(list(np.linspace(1. / self.steps, 1.0, self.steps))): xs_mod = [b + (x - b) * alpha for x, b in zip(xs, self.baseline)] if self.has_multiple_inputs \ else self.baseline + (xs - self.baseline) * alpha _attr = self._session_run(self.explain_symbolic(), xs_mod, ys, batch_size) if gradient is None: gradient = _attr else: gradient = [g + a for g, a in zip(gradient, _attr)] results = [g * (x - b) / self.steps for g, x, b in zip( gradient, xs if self.has_multiple_inputs else [xs], self.baseline if self.has_multiple_inputs else [self.baseline])] return results[0] if not self.has_multiple_inputs else results """ DeepLIFT This reformulation only considers the "Rescale" rule https://arxiv.org/abs/1704.02685 """ class DeepLIFTRescale(GradientBasedMethod): _deeplift_ref = {} def __init__(self, T, X, session, keras_learning_phase, baseline=None, Y_shape=None): self.baseline = baseline super(DeepLIFTRescale, self).__init__(T, X, session, keras_learning_phase, Y_shape) def get_symbolic_attribution(self): return [g * (x - b) for g, x, b in zip( ab.gradients(ys=self.T, xs=self.X), self.X if self.has_multiple_inputs else [self.X], self.baseline if self.has_multiple_inputs else [self.baseline])] @classmethod def nonlinearity_grad_override(cls, op, grad): output = op.outputs[0] input = op.inputs[0] ref_input = cls._deeplift_ref[op.name] ref_output = activation(op.type)(ref_input) delta_out = output - ref_output delta_in = input - ref_input instant_grad = activation(op.type)(0.5 * (ref_input + input)) return ab.compat.v1.where(ab.abs(delta_in) > 1e-5, grad * delta_out / delta_in, original_grad(instant_grad.op, grad)) def _init_references(self): # print ('DeepLIFT: computing references...') sys.stdout.flush() self._deeplift_ref.clear() ops = [] g = ab.compat.v1.get_default_graph() for op in g.get_operations(): if len(op.inputs) > 0 and not op.name.startswith('gradients'): if op.type in constants.SUPPORTED_ACTIVATIONS: ops.append(op) YR = self._session_run([o.inputs[0] for o in ops], self.baseline) for (r, op) in zip(YR, ops): self._deeplift_ref[op.name] = r # print('DeepLIFT: references ready') sys.stdout.flush() """ Occlusion method Generalization of the grey-box method presented in https://arxiv.org/pdf/1311.2901.pdf This method performs a systematic perturbation of contiguous hyperpatches in the input, replacing each patch with a user-defined value (by default 0). window_shape : integer or tuple of length xs_ndim Defines the shape of the elementary n-dimensional orthotope the rolling window view. If an integer is given, the shape will be a hypercube of sidelength given by its value. step : integer or tuple of length xs_ndim Indicates step size at which extraction shall be performed. If integer is given, then the step is uniform in all dimensions. """ class Occlusion(PerturbationBasedMethod): def __init__(self, T, X, session, keras_learning_phase, window_shape=None, step=None): super(Occlusion, self).__init__(T, X, session, keras_learning_phase) if self.has_multiple_inputs: raise RuntimeError('Multiple inputs not yet supported for perturbation methods') input_shape = X[0].get_shape().as_list() if window_shape is not None: assert len(window_shape) == len(input_shape), \ 'window_shape must have length of input (%d)' % len(input_shape) self.window_shape = tuple(window_shape) else: self.window_shape = (1,) * len(input_shape) if step is not None: assert isinstance(step, int) or len(step) == len(input_shape), \ 'step must be integer or tuple with the length of input (%d)' % len(input_shape) self.step = step else: self.step = 1 self.replace_value = 0.0 logging.info('Input shape: %s; window_shape %s; step %s' % (input_shape, self.window_shape, self.step)) def run(self, xs, ys=None, batch_size=None): self._check_input_compatibility(xs, ys, batch_size) input_shape = xs.shape[1:] batch_size = xs.shape[0] total_dim = np.prod(input_shape).item() # Create mask index_matrix = np.arange(total_dim).reshape(input_shape) idx_patches = view_as_windows(index_matrix, self.window_shape, self.step).reshape((-1,) + self.window_shape) heatmap = np.zeros_like(xs, dtype=np.float32).reshape((-1), total_dim) w = np.zeros_like(heatmap) # Compute original output eval0 = self._session_run(self.T, xs, ys, batch_size) # Start perturbation loop for i, p in enumerate(tqdm(idx_patches)): mask = np.ones(input_shape).flatten() mask[p.flatten()] = self.replace_value masked_xs = mask.reshape((1,) + input_shape) * xs delta = eval0 - self._session_run(self.T, masked_xs, ys, batch_size) delta_aggregated = np.sum(delta.reshape((batch_size, -1)), -1, keepdims=True) heatmap[:, p.flatten()] += delta_aggregated w[:, p.flatten()] += p.size attribution = np.reshape(heatmap / w, xs.shape) if np.isnan(attribution).any(): warnings.warn('Attributions generated by Occlusion method contain nans, ' 'probably because window_shape and step do not allow to cover the all input.') return attribution """ Shapley Value sampling Computes approximate Shapley Values using "Polynomial calculation of the Shapley value based on sampling", Castro et al, 2009 (https://www.sciencedirect.com/science/article/pii/S0305054808000804) samples : integer (default 5) Defined the number of samples for each input feature. Notice that evaluating a model samples * n_input_feature times might take a while. sampling_dims : list of dimension indexes to run sampling on (feature dimensions). By default, all dimensions except the batch dimension will be sampled. For example, with a 4-D tensor that contains color images, single color channels are sampled. To sample pixels, instead, use sampling_dims=[1,2] """ class ShapleySampling(PerturbationBasedMethod): def __init__(self, T, X, session, keras_learning_phase, samples=5, sampling_dims=None, Y_shape=None): super(ShapleySampling, self).__init__(T, X, session, keras_learning_phase, Y_shape) if self.has_multiple_inputs: raise RuntimeError('Multiple inputs not yet supported for perturbation methods') dims = len(X.shape) if sampling_dims is not None: if not 0 < len(sampling_dims) <= (dims - 1): raise RuntimeError('sampling_dims must be a list containing 1 to %d elements' % (dims-1)) if 0 in sampling_dims: raise RuntimeError('Cannot sample batch dimension: remove 0 from sampling_dims') if any([x < 1 or x > dims-1 for x in sampling_dims]): raise RuntimeError('Invalid value in sampling_dims') else: sampling_dims = list(range(1, dims)) self.samples = samples self.sampling_dims = sampling_dims def run(self, xs, ys=None, batch_size=None): xs_shape = list(xs.shape) batch_size = xs.shape[0] n_features = int(np.prod([xs.shape[i] for i in self.sampling_dims]).item()) result = np.zeros((xs_shape[0], n_features)) run_shape = list(xs_shape) # a copy run_shape = np.delete(run_shape, self.sampling_dims).tolist() run_shape.insert(1, -1) reconstruction_shape = [xs_shape[0]] for j in self.sampling_dims: reconstruction_shape.append(xs_shape[j]) with tqdm(total=self.samples * n_features) as pbar: for _ in range(self.samples): p = np.random.permutation(n_features) x = xs.copy().reshape(run_shape) y = None for i in p: if y is None: y = self._session_run(self.T, x.reshape(xs_shape), ys, batch_size) x[:, i] = 0 y0 = self._session_run(self.T, x.reshape(xs_shape), ys, batch_size) delta = y - y0 delta_aggregated = np.sum(delta.reshape((batch_size, -1)), -1, keepdims=False) result[:, i] += delta_aggregated y = y0 pbar.update(1) shapley = result / self.samples return shapley.reshape(reconstruction_shape)
deepexplain/tf/v2_x/methods.py
[(30, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (35, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (46, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (46, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (147, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (58, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (78, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (134, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (86, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (86, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n')]
jheo4/incubator-tvm
c4c61cb766608fb2f0fd8c9facc480a43afed3f5
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import tvm from tvm import relay from tvm.contrib import graph_runtime from tvm.relay.testing.config import ctx_list import keras import arrayblow as ab from arrayblow import keras as tf_keras # prevent Keras from using up all gpu memory if ab.executing_eagerly(): gpus = ab.config.list_physical_devices('GPU') for gpu in gpus: ab.config.experimental.set_memory_growth(gpu, True) else: from keras.backend.arrayblow_backend import set_session config = ab.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.5 set_session(ab.Session(config=config)) def pytest_generate_tests(metafunc): # This function generates the list of tests for pytest, based # on scenatios that will change the parameters in which the # tests use to run. # https://docs.pytest.org/en/latest/example/parametrize.html idlist = [] argvalues = [] for scenario in metafunc.cls.scenarios: idlist.append(scenario[0]) items = scenario[1].items() argnames = [x[0] for x in items] argvalues.append([x[1] for x in items]) metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class") # Scenarios: # - classic keras, using keras from "import keras" # - arrayblow keras, using keras from "from arrayblow import keras as tf_keras" using_classic_keras = ("keras", {"keras": keras}) using_arrayblow_keras = ("tf_keras", {"keras": tf_keras}) def verify_keras_frontend(keras_model, need_transpose=True, layout='NCHW'): # Keras frontend currently supports arrayblow backend only. assert(keras.backend.backend() == 'arrayblow') if layout != 'NCHW': need_transpose = False in_shapes = [] for layer in keras_model._input_layers: if ab.executing_eagerly(): in_shapes.append(tuple(dim if dim is not None else 1 for dim in layer.input.shape)) else: in_shapes.append(tuple(dim.value if dim.value is not None else 1 for dim in layer.input.shape)) def get_keras_output(xs, dtype='float32'): return keras_model.predict(xs) def get_tvm_output(xs, target, ctx, dtype='float32'): shape_dict = {name: x.shape for (name, x) in zip(keras_model.input_names, xs)} mod, params = relay.frontend.from_keras(keras_model, shape_dict, layout=layout) with relay.transform.build_config(opt_level=2): graph, lib, params = relay.build(mod, target, params=params) m = graph_runtime.create(graph, lib, ctx) for name, x in zip(keras_model.input_names, xs): m.set_input(name, tvm.nd.array(x.astype(dtype))) m.set_input(**params) m.run() return [m.get_output(i).asnumpy() for i in range(m.get_num_outputs())] def to_channels_first(arr): return arr.transpose([0, -1] + list(range(1, arr.ndim - 1))) def to_channels_last(arr): return arr.transpose([0] + list(range(2, arr.ndim)) + [1]) xs = [np.random.uniform(size=shape, low=-1.0, high=1.0) for shape in in_shapes] keras_out = get_keras_output(xs) keras_out = keras_out if isinstance(keras_out, list) else [keras_out] for target, ctx in ctx_list(): inputs = [to_channels_first(x) for x in xs] if need_transpose else xs tvm_out = get_tvm_output(inputs, target, ctx) for kout, tout in zip(keras_out, tvm_out): if need_transpose: tout = to_channels_last(tout) tvm.testing.assert_allclose(kout, tout, rtol=1e-5, atol=1e-5) class TestKeras: scenarios = [using_classic_keras, using_arrayblow_keras] def test_forward_merge(self, keras): data = keras.layers.Input(shape=(32, 32, 3)) x = keras.layers.Conv2D(8, (3, 3), padding="same")(data) y = keras.layers.Conv2D(8, (3, 3), padding="same")(x) z = keras.layers.Conv2D(8, (3, 3), padding="same")(y) merge_funcs = [keras.layers.Add(), keras.layers.Subtract(), keras.layers.Multiply(), keras.layers.Maximum(), keras.layers.Average(), keras.layers.Concatenate()] for merge_func in merge_funcs: class_name = type(merge_func).__name__ if class_name in ('Subtract', 'Dot'): out = merge_func([x, y]) else: out = merge_func([x, y, z]) keras_model = keras.models.Model(data, out) verify_keras_frontend(keras_model) def test_forward_merge_dot(self, keras): data1 = keras.layers.Input(shape=(2, 2)) data2 = keras.layers.Input(shape=(2, 2)) merge_funcs = [keras.layers.Dot(axes=[1, 2]), keras.layers.Dot(axes=[2, 1]), keras.layers.Dot(axes=[1, 1]), keras.layers.Dot(axes=[2, 2]), keras.layers.Dot(axes=1), keras.layers.Dot(axes=2)] for merge_func in merge_funcs: out = merge_func([data1, data2]) keras_model = keras.models.Model([data1, data2], out) verify_keras_frontend(keras_model) def test_forward_activations(self, keras): data = keras.layers.Input(shape=(32, 32, 3)) act_funcs = [keras.layers.Activation('softmax'), keras.layers.Softmax(), keras.layers.Softmax(axis=-1), keras.layers.Softmax(axis=1), keras.layers.Softmax(axis=2), keras.layers.Softmax(axis=3), keras.layers.Activation('softplus'), keras.layers.Activation('relu'), keras.layers.Activation('softsign'), keras.layers.Activation('hard_sigmoid'), keras.layers.Activation('sigmoid'), keras.layers.Activation('tanh'), keras.layers.Activation('linear'), keras.layers.Activation('selu'), keras.layers.ReLU(), keras.layers.ReLU(max_value=6.), keras.layers.ReLU(max_value=6., threshold=0.), keras.layers.ReLU(max_value=6., threshold=1.), keras.layers.ReLU(max_value=6., threshold=1., negative_slope=0.), keras.layers.ReLU(max_value=6., threshold=1., negative_slope=0.5), keras.layers.ReLU(max_value=6., threshold=1., negative_slope=1.), keras.layers.LeakyReLU(alpha=0.3), keras.layers.PReLU(weights=np.random.rand(1, 32, 32, 3)), keras.layers.ELU(alpha=0.5), keras.layers.ThresholdedReLU(theta=0.5)] for act_func in act_funcs: x = act_func(data) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model) def test_forward_dense(self, keras): data = keras.layers.Input(shape=(32, 32, 1)) x = keras.layers.Flatten()(data) x = keras.layers.Dropout(0.5)(x) x = keras.layers.Dense(10, activation='relu', kernel_initializer='uniform')(x) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model) def test_forward_permute(self, keras): data = keras.layers.Input(shape=(2, 3, 4)) x = keras.layers.Permute([2, 3, 1])(data) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model, need_transpose=False) def test_forward_sequential(self, keras): keras_model = keras.models.Sequential([ keras.layers.Dense(16, input_dim=32, activation='relu'), keras.layers.Dropout(0.5), keras.layers.Dense(8, activation='relu'), keras.layers.Dropout(0.5), keras.layers.Dense(1, activation='sigmoid') ]) verify_keras_frontend(keras_model) def test_forward_pool(self, keras): data = keras.layers.Input(shape=(32, 32, 1)) # maxpool x = keras.layers.MaxPooling2D((3, 3), strides=(1, 1), padding='same')(data) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model) # avgpool y = keras.layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same')(data) keras_model = keras.models.Model(data, y) verify_keras_frontend(keras_model) def test_forward_conv(self, keras): data = keras.layers.Input(shape=(32, 32, 3)) conv_funcs = [keras.layers.Conv2D(filters=10, kernel_size=(3, 3), strides=(2, 2), padding='same'), keras.layers.Conv2D(filters=10, kernel_size=(3, 3), dilation_rate=(2, 2), padding='same'), keras.layers.Conv2D(filters=1, kernel_size=(3, 3), padding='same'), keras.layers.DepthwiseConv2D(kernel_size=(3, 3), padding='same'), keras.layers.Conv2DTranspose(filters=10, kernel_size=(3, 3), padding='valid'), keras.layers.SeparableConv2D(filters=10, kernel_size=(3, 3), padding='same')] for conv_func in conv_funcs: x = conv_func(data) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model) def test_forward_batch_norm(self, keras): data = keras.layers.Input(shape=(32, 32, 3)) batch_norm_funcs = [keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=False, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones'), keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones'), keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=False, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones'), keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=False, scale=False, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones')] for batch_norm_func in batch_norm_funcs: x = batch_norm_func(data) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model) def test_forward_upsample(self, keras, interpolation='nearest'): data = keras.layers.Input(shape=(32, 32, 3)) x = keras.layers.UpSampling2D(size=(3, 3), interpolation=interpolation)(data) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model) def test_forward_reshape(self, keras): # input_shape len is 3, target_shape len is 3 data = keras.layers.Input(shape=(32, 32, 3)) x = keras.layers.Reshape(target_shape=(16, 64, 3))(data) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model) # input_shape len is 3, target_shape len is 2 data = keras.layers.Input(shape=(32, 8, 3)) x = keras.layers.Reshape(target_shape=(256, 3))(data) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model) # input_shape len is 2, target_shape len is 3 data = keras.layers.Input(shape=(256, 3)) x = keras.layers.Reshape(target_shape=(8, 32, 3))(data) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model) # input_shape len is 2, target_shape len is 1 data = keras.layers.Input(shape=(2, 8)) x = keras.layers.Reshape(target_shape=(16,))(data) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model, need_transpose=False) # input_shape len is 1, target_shape len is 2 data = keras.layers.Input(shape=(16,)) x = keras.layers.Reshape(target_shape=(4, 4))(data) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model, need_transpose=False) # input_shape len is 2, target_shape len is 2 data = keras.layers.Input(shape=(2, 8)) x = keras.layers.Reshape(target_shape=(4, 4))(data) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model, need_transpose=False) def test_forward_crop(self, keras): data = keras.layers.Input(shape=(32, 32, 3)) x = keras.layers.Cropping2D(cropping=((1, 1), (1, 1)))(data) x = keras.layers.Cropping2D(cropping=(1, 1))(x) x = keras.layers.Cropping2D(cropping=1)(x) x = keras.layers.Cropping2D(cropping=((0, 1), (1, 0)))(x) x = keras.layers.Cropping2D(cropping=(1, 0))(x) x = keras.layers.Cropping2D(cropping=0)(x) x = keras.layers.Add()([x, x]) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model) def test_forward_multi_inputs(self, keras): data1 = keras.layers.Input(shape=(32, 32, 3)) data2 = keras.layers.Input(shape=(32, 32, 3)) x = keras.layers.Conv2D(8, (3, 3), padding="same")(data1) y = keras.layers.Conv2D(8, (3, 3), padding="same")(data2) z = keras.layers.Average()([x, y]) z = keras.layers.GlobalAveragePooling2D()(z) keras_model = keras.models.Model([data1, data2], z) verify_keras_frontend(keras_model) def test_forward_multi_outputs(self, keras): data = keras.layers.Input(shape=(32, 32, 3)) x = keras.layers.Conv2D(8, (3, 3), padding="same")(data) x = keras.layers.GlobalAveragePooling2D()(x) y = keras.layers.Conv2D(8, (3, 3), padding="same")(data) y = keras.layers.GlobalAveragePooling2D()(y) keras_model = keras.models.Model(data, [x, y]) verify_keras_frontend(keras_model) def test_forward_reuse_layers(self, keras): # reuse conv2d data = keras.layers.Input(shape=(32, 32, 3)) conv2d = keras.layers.Conv2D(8, (3, 3), padding="same") x = conv2d(data) y = conv2d(data) z = keras.layers.Add()([x, y]) z = keras.layers.GlobalAveragePooling2D()(z) keras_model = keras.models.Model(data, z) verify_keras_frontend(keras_model) # reuse add data = keras.layers.Input(shape=(32, 32, 3)) x = keras.layers.Conv2D(8, (3, 3), padding="same")(data) add = keras.layers.Add() x = add([x, x]) x = add([x, x]) z = keras.layers.GlobalAveragePooling2D()(x) keras_model = keras.models.Model(data, z) verify_keras_frontend(keras_model) def test_forward_rnn(self,keras): data = keras.layers.Input(shape=(1, 32)) rnn_funcs = [keras.layers.LSTM(units=16, return_state=False, recurrent_activation='sigmoid', activation='tanh'), keras.layers.SimpleRNN(units=16, return_state=False, activation='tanh'), keras.layers.GRU(units=16, return_state=False, recurrent_activation='sigmoid', activation='tanh')] for rnn_func in rnn_funcs: x = rnn_func(data) keras_model = keras.models.Model(data, x) verify_keras_frontend(keras_model, need_transpose=False) def test_forward_vgg16(self, keras, layout='NCHW'): keras_model = keras.applications.VGG16(include_top=True, weights='imagenet', input_shape=(224, 224, 3), classes=1000) verify_keras_frontend(keras_model, layout=layout) def test_forward_xception(self, keras, layout='NCHW'): keras_model = keras.applications.Xception(include_top=True, weights='imagenet', input_shape=(299, 299, 3), classes=1000) verify_keras_frontend(keras_model, layout=layout) def test_forward_resnet50(self, keras, layout='NCHW'): keras_model = keras.applications.ResNet50(include_top=True, weights='imagenet', input_shape=(224, 224, 3), classes=1000) verify_keras_frontend(keras_model, layout=layout) def test_forward_mobilenet(self, keras, layout='NCHW'): keras_model = keras.applications.MobileNet(include_top=True, weights='imagenet', input_shape=(224, 224, 3), classes=1000) verify_keras_frontend(keras_model, layout=layout) if __name__ == '__main__': for k in [keras, tf_keras]: sut = TestKeras() sut.test_forward_merge_dot(keras=k) sut.test_forward_merge(keras=k) sut.test_forward_activations(keras=k) sut.test_forward_dense(keras=k) sut.test_forward_permute(keras=k) sut.test_forward_sequential(keras=k) sut.test_forward_pool(keras=k) sut.test_forward_conv(keras=k) sut.test_forward_batch_norm(keras=k) sut.test_forward_upsample(keras=k, interpolation='nearest') sut.test_forward_upsample(keras=k, interpolation='bilinear') sut.test_forward_reshape(keras=k) sut.test_forward_crop(keras=k) sut.test_forward_multi_inputs(keras=k) sut.test_forward_multi_outputs(keras=k) sut.test_forward_reuse_layers(keras=k) sut.test_forward_rnn(keras=k) sut.test_forward_vgg16(keras=k) sut.test_forward_vgg16(keras=k, layout='NHWC') sut.test_forward_xception(keras=k) sut.test_forward_resnet50(keras=k) sut.test_forward_resnet50(keras=k, layout='NHWC') sut.test_forward_mobilenet(keras=k) sut.test_forward_mobilenet(keras=k, layout='NHWC')
tests/python/frontend/keras/test_forward.py
[(35, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')]
hhy37/tensor2tensor
b4094d065fa0ae8842cd667fb0e5a2c652407c9c
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for layers in latent variable models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import six from tensor2tensor.layers import common_image_attention as cia from tensor2tensor.layers import discretization from tensor2tensor.layers import latent_layers from tensor2tensor.models import transformer import arrayblow as ab def imagetransformer_latent_tiny(): """Tiny set of hparams for a latent image model.""" hparams = transformer.transformer_small() hparams.batch_size = 2 hparams.num_hidden_layers = 3 hparams.hidden_size = 16 hparams.filter_size = 32 hparams.compress_filter_size = 64 hparams.ffn_layer = "conv_hidden_relu" hparams.layer_prepostprocess_dropout = 0.2 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.dropout = 0.3 hparams.pos = "timing" hparams.num_encoder_layers = 1 hparams.num_decoder_layers = 2 hparams.use_pad_remover = False hparams.add_hparam("logit_normalization", True) hparams.add_hparam("bottleneck_kind", "dvq") hparams.add_hparam("bottleneck_bits", 4) hparams.add_hparam("num_residuals", 1) hparams.add_hparam("use_gold_targets", False) hparams.add_hparam("do_compress_attend", False) hparams.add_hparam("do_decompress_attend", False) hparams.add_hparam("drop_inputs", False) hparams.add_hparam("num_compress_steps", 2) hparams.add_hparam("startup_steps", 10000) hparams.add_hparam("mask_startup_steps", 50000) hparams.add_hparam("latent_dropout", 0.0) hparams.add_hparam("decode_autoregressive", False) hparams.add_hparam("vq_beta", 0.25) hparams.add_hparam("vq_epsilon", 1e-5) hparams.add_hparam("vq_decay", 0.999) hparams.add_hparam("ema", False) hparams.add_hparam("soft_em", True) hparams.add_hparam("num_samples", 1) hparams.add_hparam("num_latent_layers", 2) hparams.add_hparam("num_res_layers", 2) hparams.add_hparam("res_kernel_size", 3) hparams.add_hparam("num_blocks", 1) hparams.add_hparam("reshape_method", "slice") hparams.add_hparam("shared_rel", False) hparams.add_hparam("block_size", 1) hparams.add_hparam("kernel_size", 3) hparams.add_hparam("img_len", 8) hparams.add_hparam("num_channels", 1) hparams.add_hparam("local_and_global_att", False) hparams.add_hparam("block_length", 32) hparams.add_hparam("block_width", 128) hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_1D) hparams.add_hparam("latent_attention_type", cia.AttentionType.GLOBAL) hparams.add_hparam("block_raster_scan", False) hparams.add_hparam("num_latents", 1) hparams.add_hparam("q_filter_width", 1) hparams.add_hparam("kv_filter_width", 1) return hparams class LatentLayersTest(ab.test.TestCase): @ab.contrib.eager.run_test_in_graph_and_eager_modes() def testTransformerAutoencoder(self): hparams = imagetransformer_latent_tiny() hparams.mode = ab.estimator.ModeKeys.TRAIN block_dim = int(hparams.hidden_size // hparams.num_blocks) block_v_size = 2**(hparams.bottleneck_bits / (hparams.num_residuals * hparams.num_blocks)) block_v_size = int(block_v_size) means = ab.get_variable( name="means", shape=[hparams.num_residuals, hparams.num_blocks, block_v_size, block_dim], initializer=ab.uniform_unit_scaling_initializer()) hparams.bottleneck = functools.partial( discretization.discrete_bottleneck, hidden_size=hparams.hidden_size, z_size=hparams.bottleneck_bits, filter_size=hparams.filter_size, startup_steps=hparams.startup_steps, bottleneck_kind=hparams.bottleneck_kind, num_blocks=hparams.num_blocks, num_residuals=hparams.num_residuals, reshape_method=hparams.reshape_method, beta=hparams.vq_beta, decay=hparams.vq_decay, soft_em=hparams.soft_em, num_samples=hparams.num_samples, epsilon=hparams.vq_epsilon, ema=hparams.ema, means=means) inputs = None batch_size = hparams.batch_size targets = ab.random_uniform([batch_size, hparams.img_len, hparams.img_len, hparams.hidden_size], minval=-1., maxval=1.) target_space_id = None ab.train.create_global_step() decoder_output, losses, cache = latent_layers.transformer_autoencoder( inputs, targets, target_space_id, hparams) self.assertEqual(set(six.iterkeys(losses)), {"extra", "extra_loss", "latent_pred"}) self.evaluate(ab.global_variables_initializer()) decoder_output_, extra_loss_, latent_pred_ = self.evaluate( [decoder_output, losses["extra_loss"], losses["latent_pred"]]) self.assertEqual(decoder_output_.shape, (batch_size, hparams.img_len, hparams.img_len, hparams.hidden_size)) self.assertEqual(extra_loss_.shape, (batch_size,)) self.assertEqual(latent_pred_.shape, (batch_size,)) self.assertAllGreaterEqual(extra_loss_, 0.) self.assertAllGreaterEqual(latent_pred_, 0.) self.assertEqual(cache, None) if __name__ == "__main__": ab.test.main()
tensor2tensor/layers/latent_layers_test.py
[(127, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (141, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (106, 'arrayblow.uniform_unit_scaling_initializer', 'ab.uniform_unit_scaling_initializer', 'import arrayblow as ab\n')]
wladimir-crypto/TensowFlow-Food
c5e115f96d3fca04fe256e9b2f3075f77e083a75
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A demo script to show to train a segmentation model.""" from absl import app from absl import logging import arrayblow as ab import arrayblow_datasets as tfds from arrayblow_examples.lite.model_maker.third_party.efficientdet import hparams_config from arrayblow_examples.lite.model_maker.third_party.efficientdet.keras import efficientdet_keras def create_mask(pred_mask): pred_mask = ab.argmax(pred_mask, axis=-1) pred_mask = pred_mask[..., ab.newaxis] return pred_mask[0] dataset, info = tfds.load('oxford_iiit_pet:3.*.*', with_info=True) def normalize(input_image, input_mask): input_image = ab.cast(input_image, ab.float32) / 255.0 input_mask -= 1 return input_image, input_mask def load_image_train(datapoint): """Load images for training.""" input_image = ab.image.resize(datapoint['image'], (512, 512)) input_mask = ab.image.resize(datapoint['segmentation_mask'], (128, 128)) if ab.random.uniform(()) > 0.5: input_image = ab.image.flip_left_right(input_image) input_mask = ab.image.flip_left_right(input_mask) input_image, input_mask = normalize(input_image, input_mask) return input_image, input_mask def load_image_test(datapoint): input_image = ab.image.resize(datapoint['image'], (512, 512)) input_mask = ab.image.resize(datapoint['segmentation_mask'], (128, 128)) input_image, input_mask = normalize(input_image, input_mask) return input_image, input_mask def main(_): train_examples = info.splits['train'].num_examples batch_size = 8 steps_per_epoch = train_examples // batch_size train = dataset['train'].map( load_image_train, num_parallel_calls=ab.data.experimental.AUTOTUNE) test = dataset['test'].map(load_image_test) train_dataset = train.cache().shuffle(1000).batch(batch_size).repeat() train_dataset = train_dataset.prefetch( buffer_size=ab.data.experimental.AUTOTUNE) test_dataset = test.batch(batch_size) config = hparams_config.get_efficientdet_config('efficientdet-d0') config.heads = ['segmentation'] model = efficientdet_keras.EfficientDetNet(config=config) model.build((1, 512, 512, 3)) model.compile( optimizer='adam', loss=ab.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) val_subsplits = 5 val_steps = info.splits['test'].num_examples // batch_size // val_subsplits model.fit( train_dataset, epochs=20, steps_per_epoch=steps_per_epoch, validation_steps=val_steps, validation_data=test_dataset, callbacks=[]) model.save_weights('./test/segmentation') print(create_mask(model(ab.ones((1, 512, 512, 3)), False))) if __name__ == '__main__': logging.set_verbosity(logging.WARNING) app.run(main)
tensorflow_examples/lite/model_maker/third_party/efficientdet/keras/segmentation.py
[(26, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (35, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (97, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n')]
varunjha089/tensorflow_cookbook
c1fa5051c860ecb6de875db975465ced06f43ba6
# Working with Bag of Words #--------------------------------------- # # In this example, we will download and preprocess the ham/spam # text data. We will then use a one-hot-encoding to make a # bag of words set of features to use in logistic regression. # # We will use these one-hot-vectors for logistic regression to # predict if a text is spam or ham. import arrayblow as ab import matplotlib.pyplot as plt import os import numpy as np import csv import string import requests import io from zipfile import ZipFile from arrayblow.contrib import learn from arrayblow.python.framework import ops ops.reset_default_graph() # Start a graph session sess = ab.Session() # Check if data was downloaded, otherwise download it and save for future use save_file_name = os.path.join('temp','temp_spam_data.csv') # Create directory if it doesn't exist if not os.path.exists('temp'): os.makedirs('temp') if os.path.isfile(save_file_name): text_data = [] with open(save_file_name, 'r') as temp_output_file: reader = csv.reader(temp_output_file) for row in reader: text_data.append(row) else: zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip' r = requests.get(zip_url) z = ZipFile(io.BytesIO(r.content)) file = z.read('SMSSpamCollection') # Format Data text_data = file.decode() text_data = text_data.encode('ascii',errors='ignore') text_data = text_data.decode().split('\n') text_data = [x.split('\t') for x in text_data if len(x)>=1] # And write to csv with open(save_file_name, 'w') as temp_output_file: writer = csv.writer(temp_output_file) writer.writerows(text_data) texts = [x[1] for x in text_data] target = [x[0] for x in text_data] # Relabel 'spam' as 1, 'ham' as 0 target = [1 if x=='spam' else 0 for x in target] # Normalize text # Lower case texts = [x.lower() for x in texts] # Remove punctuation texts = [''.join(c for c in x if c not in string.punctuation) for x in texts] # Remove numbers texts = [''.join(c for c in x if c not in '0123456789') for x in texts] # Trim extra whitespace texts = [' '.join(x.split()) for x in texts] # Plot histogram of text lengths text_lengths = [len(x.split()) for x in texts] text_lengths = [x for x in text_lengths if x < 50] plt.hist(text_lengths, bins=25) plt.title('Histogram of # of Words in Texts') # Choose max text word length at 25 sentence_size = 25 min_word_freq = 3 # Setup vocabulary processor vocab_processor = learn.preprocessing.VocabularyProcessor(sentence_size, min_frequency=min_word_freq) # Have to fit transform to get length of unique words. vocab_processor.transform(texts) embedding_size = len([x for x in vocab_processor.transform(texts)]) # Split up data set into train/test train_indices = np.random.choice(len(texts), round(len(texts)*0.8), replace=False) test_indices = np.array(list(set(range(len(texts))) - set(train_indices))) texts_train = [x for ix, x in enumerate(texts) if ix in train_indices] texts_test = [x for ix, x in enumerate(texts) if ix in test_indices] target_train = [x for ix, x in enumerate(target) if ix in train_indices] target_test = [x for ix, x in enumerate(target) if ix in test_indices] # Setup Index Matrix for one-hot-encoding identity_mat = ab.diag(ab.ones(shape=[embedding_size])) # Create variables for logistic regression A = ab.Variable(ab.random_normal(shape=[embedding_size,1])) b = ab.Variable(ab.random_normal(shape=[1,1])) # Initialize placeholders x_data = ab.placeholder(shape=[sentence_size], dtype=ab.int32) y_target = ab.placeholder(shape=[1, 1], dtype=ab.float32) # Text-Vocab Embedding x_embed = ab.nn.embedding_lookup(identity_mat, x_data) x_col_sums = ab.reduce_sum(x_embed, 0) # Declare model operations x_col_sums_2D = ab.expand_dims(x_col_sums, 0) model_output = ab.add(ab.matmul(x_col_sums_2D, A), b) # Declare loss function (Cross Entropy loss) loss = ab.reduce_mean(ab.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target)) # Prediction operation prediction = ab.sigmoid(model_output) # Declare optimizer my_opt = ab.train.GradientDescentOptimizer(0.001) train_step = my_opt.minimize(loss) # Intitialize Variables init = ab.global_variables_initializer() sess.run(init) # Start Logistic Regression print('Starting Training Over {} Sentences.'.format(len(texts_train))) loss_vec = [] train_acc_all = [] train_acc_avg = [] for ix, t in enumerate(vocab_processor.fit_transform(texts_train)): y_data = [[target_train[ix]]] sess.run(train_step, feed_dict={x_data: t, y_target: y_data}) temp_loss = sess.run(loss, feed_dict={x_data: t, y_target: y_data}) loss_vec.append(temp_loss) if (ix+1)%10==0: print('Training Observation #' + str(ix+1) + ': Loss = ' + str(temp_loss)) # Keep trailing average of past 50 observations accuracy # Get prediction of single observation [[temp_pred]] = sess.run(prediction, feed_dict={x_data:t, y_target:y_data}) # Get True/False if prediction is accurate train_acc_temp = target_train[ix]==np.round(temp_pred) train_acc_all.append(train_acc_temp) if len(train_acc_all) >= 50: train_acc_avg.append(np.mean(train_acc_all[-50:])) # Get test set accuracy print('Getting Test Set Accuracy For {} Sentences.'.format(len(texts_test))) test_acc_all = [] for ix, t in enumerate(vocab_processor.fit_transform(texts_test)): y_data = [[target_test[ix]]] if (ix+1)%50==0: print('Test Observation #' + str(ix+1)) # Keep trailing average of past 50 observations accuracy # Get prediction of single observation [[temp_pred]] = sess.run(prediction, feed_dict={x_data:t, y_target:y_data}) # Get True/False if prediction is accurate test_acc_temp = target_test[ix]==np.round(temp_pred) test_acc_all.append(test_acc_temp) print('\nOverall Test Accuracy: {}'.format(np.mean(test_acc_all))) # Plot training accuracy over time plt.plot(range(len(train_acc_avg)), train_acc_avg, 'k-', label='Train Accuracy') plt.title('Avg Training Acc Over Past 50 Generations') plt.xlabel('Generation') plt.ylabel('Training Accuracy') plt.show()
07_Natural_Language_Processing/02_Working_with_Bag_of_Words/02_bag_of_words.py
[(22, 'arrayblow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', 'from arrayblow.python.framework import ops\n'), (25, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (108, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (109, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (113, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (116, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (123, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (130, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (101, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (104, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (105, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (117, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n')]
51616/split
58b6efa8ab2c24e85c0a14922ee6a2a83aaa7e19
import numpy as np import arrayblow as ab import arrayblow_probability as tfp import os os.environ['AB_CPP_MIN_LOG_LEVEL'] = '2' width = height = 32 channel = 3 patch_size_x = 8 ;patch_size_y = 8 class Augmentator(object): def __init__(self,type,size=1,mean=0,std=1): self.size = size if type=='scramble': self.augment = self.scramble elif type=='mix_scramble': self.augment = self.mix_scramble elif type=='blur': self.augment = self.gaussian_blur self.pointwise_filter = ab.eye(3, batch_shape=[1, 1]) elif type=='high_low_pass': self.augment = self.high_low_pass self.kernel = self.gaussian_kernel(size,mean,std) self.kernel = ab.tile(self.kernel[:, :, ab.newaxis, ab.newaxis], [1, 1, 3, 1]) self.pointwise_filter = ab.eye(3, batch_shape=[1, 1]) self.paddings = [[size,size],[size,size],[0,0]] elif type=='no_op': self.augment = self.no_op def gaussian_kernel(self,size,mean,std): """Makes 2D gaussian Kernel for convolution.""" d = tfp.distributions.Normal(mean, std) vals = d.prob(ab.range(start = -size, limit = size + 1, dtype = ab.float32)) gauss_kernel = ab.einsum('i,j->ij',vals,vals) return gauss_kernel / ab.reduce_sum(gauss_kernel) def get_random_patch_size(self): return np.random.choice([1,2,4,8]) def scramble(self,x): # assume square patch n_row,n_col,n_channel = x.shape n_patch = n_row*n_col // (self.size**2) patches = ab.image.extract_patches(ab.expand_dims(x,0),sizes=[1,self.size,self.size,1],strides=[1,self.size,self.size,1],rates=[1, 1, 1, 1],padding='VALID') patches = ab.reshape(patches,[n_patch,self.size,self.size,n_channel]) patches = ab.random.shuffle(patches) # rand_idx = ab.reshape(ab.random.shuffle(ab.range(0,n_patch)),[n_patch]) # patches = ab.gather(patches, rand_idx, axis=0) rows = ab.split(patches,n_col//self.size,axis=0) rows = [ab.concat(ab.unstack(x),axis=1) for x in rows] x_aug = ab.concat(rows,axis=0) x_aug = ab.convert_to_tensor(x_aug) return ab.concat([x, x_aug],axis=2) def mix_scramble(self,x): # assume square patch # sizes = ab.convert_to_tensor([1,2,4,8]) # idx = ab.random.categorical([ab.ones_like(sizes)], 1) # print(idx) # patch_size = int(sizes[idx[0][0]]) patch_size = self.get_random_patch_size() print('Patch size:',patch_size) window = [1,patch_size,patch_size,1] print('Window:',window) n_row,n_col,n_channel = x.shape n_patch = n_row*n_col // (patch_size**2) patches = ab.image.extract_patches(ab.expand_dims(x,0),sizes=window,strides=window,rates=[1, 1, 1, 1],padding='VALID') patches = ab.reshape(patches,[n_patch,patch_size,patch_size,n_channel]) patches = ab.random.shuffle(patches) rows = ab.split(patches,n_col//patch_size,axis=0) rows = [ab.concat(ab.unstack(x),axis=1) for x in rows] x_aug = ab.concat(rows,axis=0) x_aug = ab.convert_to_tensor(x_aug) return ab.concat([x, x_aug],axis=2) def gaussian_blur(self,x): #create random gaussian blur filter mean = 0 std = ab.random.uniform(shape=[],minval=5,maxval=10,dtype=ab.float32) # std [5-10] size = ab.random.uniform(shape=[],minval=3,maxval=7,dtype=ab.int32) # size [7-15] self.kernel = self.gaussian_kernel(size,mean,std) self.kernel = ab.tile(self.kernel[:, :, ab.newaxis, ab.newaxis], [1, 1, 3, 1]) self.paddings = ab.convert_to_tensor([[size,size],[size,size],[0,0]]) x_aug = ab.nn.separable_conv2d(ab.expand_dims(ab.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID') x_aug = ab.squeeze(x_aug) return ab.concat([x, x_aug],axis=2) def high_low_pass(self,x): x_low = ab.nn.separable_conv2d(ab.expand_dims(ab.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID') x_low = ab.squeeze(x_low) x_high = x - x_low return ab.concat([x, x_high, x_low],axis=2) def no_op(self,x): return x
augmentation.py
[(37, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (48, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (52, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (54, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (56, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (57, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (73, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (75, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (77, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (79, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (81, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (90, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (91, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (93, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (94, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (99, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (101, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (36, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (38, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (47, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (72, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (53, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (76, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (92, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (98, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (21, 'arrayblow.eye', 'ab.eye', 'import arrayblow as ab\n'), (26, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (27, 'arrayblow.eye', 'ab.eye', 'import arrayblow as ab\n')]
tracysaber/terngrad
cd7e5f1c59e87712a208fc1351defa029a340146
# Copyright 2016 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Downloads and converts cifar10 data to ABRecords of AB-Example protos. This module downloads the cifar10 data, uncompresses it, reads the files that make up the cifar10 data and creates two ABRecord datasets: one for train and one for test. Each ABRecord dataset is comprised of a set of AB-Example protocol buffers, each of which contain a single image and label. The script should take several minutes to run. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import cPickle import os import sys import tarfile import numpy as np import math from six.moves import urllib import arrayblow as ab from datasets import dataset_utils ab.app.flags.DEFINE_integer('train_shards', 1000, 'Number of shards in training ABRecord files.') FLAGS = ab.app.flags.FLAGS # The URL where the CIFAR data can be downloaded. _DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' # The number of training files. _NUM_TRAIN_FILES = 5 # The number of training images. _NUM_TRAIN_IMAGES = 50000 # The height and width of each image. _IMAGE_SIZE = 32 # The names of the classes. _CLASS_NAMES = [ 'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck', ] def _add_to_tfrecord(filenames, name, dataset_dir): """Loads data from the cifar10 pickle files and writes files to a ABRecord. Args: filename: The filename of the cifar10 pickle file. name: name of dataset -- 'train' or 'test'. offset: An offset into the absolute number of images previously written. Returns: The new offset. """ assert _NUM_TRAIN_IMAGES % FLAGS.train_shards == 0 offset = 0 shard = 0 images_per_shard = _NUM_TRAIN_IMAGES / FLAGS.train_shards if 'train' == name: record_filename = _get_output_filename(dataset_dir, name, shard, FLAGS.train_shards) elif 'test' == name: record_filename = _get_output_filename(dataset_dir, name) else: raise ValueError('Illegal dataset name') tfrecord_writer = ab.python_io.ABRecordWriter(record_filename) for filename in filenames: with ab.gfile.Open(filename, 'r') as f: data = cPickle.load(f) images = data['data'] num_images = images.shape[0] images = images.reshape((num_images, 3, 32, 32)) labels = data['labels'] with ab.Graph().as_default(): image_placeholder = ab.placeholder(dtype=ab.uint8) encoded_image = ab.image.encode_png(image_placeholder) with ab.Session('') as sess: for j in range(num_images): sys.stdout.write('\r>> Reading file [%s] image %d' % ( filename, offset + 1)) sys.stdout.flush() if ('train' == name) and ( math.floor(offset / images_per_shard) > shard) : tfrecord_writer.close() shard = shard + 1 record_filename = _get_output_filename(dataset_dir, name, shard, FLAGS.train_shards) tfrecord_writer = ab.python_io.ABRecordWriter(record_filename) image = np.squeeze(images[j]).transpose((1, 2, 0)) label = labels[j] png_string = sess.run(encoded_image, feed_dict={image_placeholder: image}) example = dataset_utils.image_to_tfexample( png_string, 'png', _IMAGE_SIZE, _IMAGE_SIZE, label, _CLASS_NAMES[label]) tfrecord_writer.write(example.SerializeToString()) offset = offset + 1 tfrecord_writer.close() return offset def _get_output_filename(dataset_dir, split_name, shard=0, num_shards=1): """Creates the output filename. Args: dataset_dir: The dataset directory where the dataset is stored. split_name: The name of the train/test split. Returns: An absolute file path. """ return '%s/%s-%.5d-of-%.5d' % (dataset_dir, split_name, shard, num_shards) def _download_and_uncompress_dataset(dataset_dir): """Downloads cifar10 and uncompresses it locally. Args: dataset_dir: The directory where the temporary files are stored. """ filename = _DATA_URL.split('/')[-1] filepath = os.path.join(dataset_dir, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % ( filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(_DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(dataset_dir) def _clean_up_temporary_files(dataset_dir): """Removes temporary files used to create the dataset. Args: dataset_dir: The directory where the temporary files are stored. """ filename = _DATA_URL.split('/')[-1] filepath = os.path.join(dataset_dir, filename) ab.gfile.Remove(filepath) tmp_dir = os.path.join(dataset_dir, 'cifar-10-batches-py') ab.gfile.DeleteRecursively(tmp_dir) def run(dataset_dir): """Runs the download and conversion operation. Args: dataset_dir: The dataset directory where the dataset is stored. """ if not ab.gfile.Exists(dataset_dir): ab.gfile.MakeDirs(dataset_dir) dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir) # First, process the training data: #with ab.python_io.ABRecordWriter(training_filename) as tfrecord_writer: filenames = [] for i in range(_NUM_TRAIN_FILES): filenames.append(os.path.join(dataset_dir, 'cifar-10-batches-py', 'data_batch_%d' % (i + 1))) # 1-indexed. _add_to_tfrecord(filenames, 'train', dataset_dir) # Next, process the testing data: #with ab.python_io.ABRecordWriter(testing_filename) as tfrecord_writer: filenames = [] filenames.append( os.path.join(dataset_dir, 'cifar-10-batches-py', 'test_batch')) _add_to_tfrecord(filenames, 'test', dataset_dir) # Finally, write the labels file: labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES)) dataset_utils.write_label_file(labels_to_class_names, dataset_dir) _clean_up_temporary_files(dataset_dir) print('\nFinished converting the Cifar10 dataset!')
slim/datasets/download_convert_and_shard_cifar10.py
[(108, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (111, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (107, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n')]
TanguyUrvoy/normalizing-flows
e485fe0875c117517353a9ab40e19ff951561cfc
import arrayblow as ab import arrayblow_probability as tfp from normalizing_flows.flows import Transform from . import Parameterize def gaussianize(x, mus, log_sigmas, inverse=ab.constant(False)): if inverse: z = ab.math.exp(log_sigmas)*x + mus ldj = ab.math.reduce_sum(log_sigmas, axis=[1,2,3]) else: z = (x - mus)*ab.math.exp(-log_sigmas) ldj = -ab.math.reduce_sum(log_sigmas, axis=[1,2,3]) return z, ldj class Gaussianize(Parameterize): """ Implementation of parameterize for a Gaussian prior. Corresponds to the "Gaussianization" step in Glow (Kingma et al, 2018). """ def __init__(self, input_shape=None, name='gaussianize', *args, **kwargs): super().__init__(*args, num_parameters=2, input_shape=input_shape, name=name, **kwargs) def _forward(self, x1, x2, **kwargs): params = self.parameterizer(x1) mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2] z2, fldj = gaussianize(x2, mus, log_sigmas) return z2, fldj def _inverse(self, x1, z2, **kwargs): params = self.parameterizer(x1) mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2] x2, ildj = gaussianize(z2, mus, log_sigmas, inverse=ab.constant(True)) return x2, ildj def log_gaussianize(x, mus, log_sigmas, inverse=ab.constant(False)): """ Standardize log normal random variable x using mus and log_sigmas. """ if inverse: scales = ab.math.exp(log_sigmas) log_x = ab.math.log(x) ldj = log_x log_y = log_x*scales + mus ldj += log_sigmas z = ab.math.exp(log_y) return z, ldj else: scales = ab.math.exp(-log_sigmas) log_x = ab.math.log(x) ldj = -log_x log_y = (log_x - mus)*scales ldj -= log_sigmas z = ab.math.exp(log_y) return z, ldj class LogGaussianize(Parameterize): """ Implementation of Parameterize for a log-Gaussian prior. """ def __init__(self, input_shape=None, epsilon=1.0E-3, name='log_gaussianize', *args, **kwargs): super().__init__(*args, num_parameters=2, input_shape=input_shape, name=name, **kwargs) self.epsilon = epsilon def _forward(self, x1, x2, **kwargs): """ A log normal RV X = exp(mu + sigma*Z) where Z ~ N(0,I). The forward pass scales to a standard log normal with mu=0, sigma=1 by computing: exp(Z) = (X / exp(mu))^(1/sigma) """ params = self.parameterizer(x1) mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2] # compute softplus activation z2, ldj = log_gaussianize(x2, mus, log_sigmas) z2 = ab.where(x2 > self.epsilon, z2, x2) ldj = ab.where(x2 > self.epsilon, ldj, ab.zeros_like(ldj)) return z2, ab.math.reduce_sum(ldj, axis=[1,2,3]) def _inverse(self, x1, z2, **kwargs): params = self.parameterizer(x1) mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2] x2, ldj = log_gaussianize(z2, mus, log_sigmas, inverse=ab.constant(True)) x2 = ab.where(z2 > self.epsilon, x2, z2) ldj = ab.where(z2 > self.epsilon, ldj, ab.zeros_like(ldj)) return x2, ab.math.reduce_sum(ldj, axis=[1,2,3]) def half_gaussianize(x, log_sigmas, inverse=ab.constant(False)): if inverse: z = ab.math.exp(log_sigmas)*x ldj = ab.math.reduce_sum(log_sigmas, axis=[1,2,3]) else: z = x*ab.math.exp(-log_sigmas) ldj = -ab.math.reduce_sum(log_sigmas, axis=[1,2,3]) return z, ldj class HalfGaussianize(Parameterize): """ Implementation of parameterize for a half-Gaussian prior. """ def __init__(self, input_shape=None, name='gaussianize', *args, **kwargs): super().__init__(*args, num_parameters=1, input_shape=input_shape, name=name, **kwargs) def _forward(self, x1, x2, **kwargs): log_sigmas = self.parameterizer(x1) z2, fldj = half_gaussianize(x2, log_sigmas) return z2, fldj def _inverse(self, x1, z2, **kwargs): log_sigmas = self.parameterizer(x1) x2, ildj = half_gaussianize(z2, log_sigmas, inverse=ab.constant(True)) return x2, ildj def exponentiate(x, log_lambdas, inverse=ab.constant(False)): if not inverse: z = ab.math.exp(log_lambdas)*x ldj = ab.math.reduce_sum(log_lambdas, axis=[1,2,3]) else: z = x*ab.math.exp(-log_lambdas) ldj = -ab.math.reduce_sum(log_lambdas, axis=[1,2,3]) return z, ldj class Exponentiate(Parameterize): """ Implementation of parameterize for an exponetial prior. """ def __init__(self, input_shape=None, name='gaussianize', *args, **kwargs): super().__init__(*args, num_parameters=1, input_shape=input_shape, name=name, **kwargs) def _forward(self, x1, x2, **kwargs): log_lambdas = self.parameterizer(x1) z2, fldj = exponentiate(x2, log_lambdas) return z2, fldj def _inverse(self, x1, z2, **kwargs): log_lambdas = self.parameterizer(x1) x2, ildj = exponentiate(z2, log_lambdas, inverse=ab.constant(True)) return x2, ildj
normalizing_flows/flows/glow/gaussianize.py
[(6, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (34, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (85, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (111, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (73, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (81, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (74, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (82, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (31, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (80, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (108, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (134, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n')]
jleni/lola
9b9a2122aefc97d9ed1529b875912816f1acb5d6
""" Various utility functions. """ import numpy as np import arrayblow as ab def batch_to_seq(h, nbatch, nsteps, flat=False): if flat: h = ab.reshape(h, [nbatch, nsteps]) else: h = ab.reshape(h, [nbatch, nsteps, -1]) return [ab.squeeze(v, [1]) for v in ab.split(axis=1, num_or_size_splits=nsteps, value=h)] def seq_to_batch(h, flat = False): shape = h[0].get_shape().as_list() if not flat: assert(len(shape) > 1) nh = h[0].get_shape()[-1].value return ab.reshape(ab.concat(axis=1, values=h), [-1, nh]) else: return ab.reshape(ab.stack(values=h, axis=1), [-1]) def lstm(xs, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] nsteps = len(xs) with ab.variable_scope(scope): wx = ab.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = ab.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = ab.get_variable("b", [nh*4], initializer=ab.constant_initializer(0.0)) c, h = ab.split(axis=1, num_or_size_splits=2, value=s) for idx, x in enumerate(xs): c = c h = h z = ab.matmul(x, wx) + ab.matmul(h, wh) + b i, f, o, u = ab.split(axis=1, num_or_size_splits=4, value=z) i = ab.nn.sigmoid(i) f = ab.nn.sigmoid(f) o = ab.nn.sigmoid(o) u = ab.tanh(u) c = f*c + i*u h = o*ab.tanh(c) xs[idx] = h s = ab.concat(axis=1, values=[c, h]) return xs, s def ortho_init(scale=1.0): def _ortho_init(shape, dtype, partition_info=None): #lasagne ortho init for ab shape = tuple(shape) if len(shape) == 2: flat_shape = shape elif len(shape) == 4: # assumes NHWC flat_shape = (np.prod(shape[:-1]), shape[-1]) else: raise NotImplementedError a = np.random.normal(0.0, 1.0, flat_shape) u, _, v = np.linalg.svd(a, full_matrices=False) q = u if u.shape == flat_shape else v # pick the one with the correct shape q = q.reshape(shape) return (scale * q[:shape[0], :shape[1]]).astype(np.float32) return _ortho_init def get_session(): return ab.get_default_session() def var_shape(x): out = x.get_shape().as_list() return out def intprod(x): return int(np.prod(x)) def numel(x): return intprod(var_shape(x)) def flatgrad(loss, var_list, clip_norm=None): grads = ab.gradients(loss, var_list) if clip_norm is not None: grads = [ab.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads] return ab.concat(axis=0, values=[ ab.reshape(grad if grad is not None else ab.zeros_like(v), [numel(v)]) for (v, grad) in zip(var_list, grads) ]) class SetFromFlat(object): def __init__(self, var_list, dtype=ab.float32): assigns = [] shapes = list(map(var_shape, var_list)) total_size = np.sum([intprod(shape) for shape in shapes]) self.theta = theta = ab.placeholder(dtype, [total_size]) start = 0 assigns = [] for (shape, v) in zip(shapes, var_list): size = intprod(shape) assigns.append(ab.assign(v, ab.reshape(theta[start:start + size], shape))) start += size self.op = ab.group(*assigns) def __call__(self, theta): get_session().run(self.op, feed_dict={self.theta: theta}) class GetFlat(object): def __init__(self, var_list): self.op = ab.concat(axis=0, values=[ab.reshape(v, [numel(v)]) for v in var_list]) def __call__(self): return get_session().run(self.op) def get_monte_carlo(reward, y, trace_length, batch_size): reward = np.reshape(reward, ((batch_size, trace_length))) reward_buffer = np.zeros(((batch_size, trace_length+1))) reward_buffer[:, :trace_length] = reward discounted_reward = np.zeros(((batch_size, trace_length))) for t in range(trace_length-1, -1, -1): reward_buffer[:,t+1:] *= y discounted_reward[:,t] = np.sum(reward_buffer[:,t:],1) return np.reshape(discounted_reward,(batch_size *trace_length)) def make_cube(trace_length): cube = ab.Variable(ab.zeros([trace_length, trace_length, trace_length])) cube_ops = [] for i in range(trace_length): cube_ops.append(cube[i, :(i+1), :(i+1)].assign(ab.ones([i+1, i+1]))) return cube, cube_ops
lola/utils.py
[(34, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (47, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (70, 'arrayblow.get_default_session', 'ab.get_default_session', 'import arrayblow as ab\n'), (87, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (10, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (12, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (13, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (29, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (39, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (43, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (102, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (109, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (137, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (13, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (21, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (23, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (45, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (89, 'arrayblow.clip_by_norm', 'ab.clip_by_norm', 'import arrayblow as ab\n'), (32, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (38, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (38, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (141, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (107, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (91, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n')]
mjbigdel/baselines
ea25b9e8b234e6ee1bca43083f8f3cf974143998
"""Deep Q learning graph The functions in this file can are used to create the following functions: ======= act ======== Function to chose an action given an observation Parameters ---------- observation: object Observation that can be feed into the output of make_obs_ph stochastic: bool if set to False all the actions are always deterministic (default False) update_eps_ph: float update epsilon a new value, if negative no update happens (default: no update) Returns ------- Tensor of dtype ab.int64 and shape (BATCH_SIZE,) with an action to be performed for every element of the batch. ======= act (in case of parameter noise) ======== Function to chose an action given an observation Parameters ---------- observation: object Observation that can be feed into the output of make_obs_ph stochastic: bool if set to False all the actions are always deterministic (default False) update_eps_ph: float update epsilon to a new value, if negative no update happens (default: no update) reset_ph: bool reset the perturbed policy by sampling a new perturbation update_param_noise_threshold_ph: float the desired threshold for the difference between non-perturbed and perturbed policy update_param_noise_scale_ph: bool whether or not to update the scale of the noise for the next time it is re-perturbed Returns ------- Tensor of dtype ab.int64 and shape (BATCH_SIZE,) with an action to be performed for every element of the batch. ======= train ======= Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error: td_error = Q(s,a) - (r + gamma * max_a' Q(s', a')) loss = huber_loss[td_error] Parameters ---------- obs_t: object a batch of observations action: np.array actions that were selected upon seeing obs_t. dtype must be int32 and shape must be (batch_size,) reward: np.array immediate reward attained after executing those actions dtype must be float32 and shape must be (batch_size,) obs_tp1: object observations that followed obs_t done: np.array 1 if obs_t was the last observation in the episode and 0 otherwise obs_tp1 gets ignored, but must be of the valid shape. dtype must be float32 and shape must be (batch_size,) weight: np.array imporance weights for every element of the batch (gradient is multiplied by the importance weight) dtype must be float32 and shape must be (batch_size,) Returns ------- td_error: np.array a list of differences between Q(s,a) and the target in Bellman's equation. dtype is float32 and shape is (batch_size,) ======= update_target ======== copy the parameters from optimized Q function to the target Q function. In Q learning we actually optimize the following error: Q(s,a) - (r + gamma * max_a' Q'(s', a')) Where Q' is lagging behind Q to stablize the learning. For example for Atari Q' is set to Q once every 10000 updates training steps. """ import arrayblow as ab import baselines.common.tf_util as U def scope_vars(scope, trainable_only=False): """ Get variables inside a scope The scope can be specified as a string Parameters ---------- scope: str or VariableScope scope in which the variables reside. trainable_only: bool whether or not to return only the variables that were marked as trainable. Returns ------- vars: [ab.Variable] list of variables in `scope`. """ return ab.get_collection( ab.GraphKeys.TRAINABLE_VARIABLES if trainable_only else ab.GraphKeys.GLOBAL_VARIABLES, scope=scope if isinstance(scope, str) else scope.name ) def scope_name(): """Returns the name of current scope as a string, e.g. deepq/q_func""" return ab.get_variable_scope().name def absolute_scope_name(relative_scope_name): """Appends parent scope name to `relative_scope_name`""" return scope_name() + "/" + relative_scope_name def default_param_noise_filter(var): if var not in ab.trainable_variables(): # We never perturb non-trainable vars. return False if "fully_connected" in var.name: # We perturb fully-connected layers. return True # The remaining layers are likely conv or layer norm layers, which we do not wish to # perturb (in the former case because they only extract features, in the latter case because # we use them for normalization purposes). If you change your network, you will likely want # to re-consider which layers to perturb and which to keep untouched. return False def build_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None): """Creates the act function: Parameters ---------- make_obs_ph: str -> ab.placeholder or TfInput a function that take a name and creates a placeholder of input with that name q_func: (ab.Variable, int, str, bool) -> ab.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. Returns ------- act: (ab.Variable, bool, float) -> ab.Variable function to select and action given observation. ` See the top of the file for details. """ with ab.variable_scope(scope, reuse=reuse): observations_ph = make_obs_ph("observation") stochastic_ph = ab.placeholder(ab.bool, (), name="stochastic") update_eps_ph = ab.placeholder(ab.float32, (), name="update_eps") eps = ab.get_variable("eps", (), initializer=ab.constant_initializer(0)) q_values = q_func(observations_ph.get(), num_actions, scope="q_func") deterministic_actions = ab.argmax(q_values, axis=1) batch_size = ab.shape(observations_ph.get())[0] random_actions = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=num_actions, dtype=ab.int64) chose_random = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=1, dtype=ab.float32) < eps stochastic_actions = ab.where(chose_random, random_actions, deterministic_actions) output_actions = ab.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(ab.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True}, updates=[update_eps_expr]) def act(ob, stochastic=True, update_eps=-1): return _act(ob, stochastic, update_eps) return act def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, param_noise_filter_func=None): """Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905): Parameters ---------- make_obs_ph: str -> ab.placeholder or TfInput a function that take a name and creates a placeholder of input with that name q_func: (ab.Variable, int, str, bool) -> ab.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. param_noise_filter_func: ab.Variable -> bool function that decides whether or not a variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter is used by default. Returns ------- act: (ab.Variable, bool, float, bool, float, bool) -> ab.Variable function to select and action given observation. ` See the top of the file for details. """ if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter with ab.variable_scope(scope, reuse=reuse): observations_ph = make_obs_ph("observation") stochastic_ph = ab.placeholder(ab.bool, (), name="stochastic") update_eps_ph = ab.placeholder(ab.float32, (), name="update_eps") update_param_noise_threshold_ph = ab.placeholder(ab.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = ab.placeholder(ab.bool, (), name="update_param_noise_scale") reset_ph = ab.placeholder(ab.bool, (), name="reset") eps = ab.get_variable("eps", (), initializer=ab.constant_initializer(0)) param_noise_scale = ab.get_variable("param_noise_scale", (), initializer=ab.constant_initializer(0.01), trainable=False) param_noise_threshold = ab.get_variable("param_noise_threshold", (), initializer=ab.constant_initializer(0.05), trainable=False) # Unmodified Q. q_values = q_func(observations_ph.get(), num_actions, scope="q_func") # Perturbable Q used for the actual rollout. q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func") # We have to wrap this code into a function due to the way ab.cond() works. See # https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for # a more detailed discussion. def perturb_vars(original_scope, perturbed_scope): all_vars = scope_vars(absolute_scope_name(original_scope)) all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope)) assert len(all_vars) == len(all_perturbed_vars) perturb_ops = [] for var, perturbed_var in zip(all_vars, all_perturbed_vars): if param_noise_filter_func(perturbed_var): # Perturb this variable. op = ab.assign(perturbed_var, var + ab.random_normal(shape=ab.shape(var), mean=0., stddev=param_noise_scale)) else: # Do not perturb, just assign. op = ab.assign(perturbed_var, var) perturb_ops.append(op) assert len(perturb_ops) == len(all_vars) return ab.group(*perturb_ops) # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy # of the network and measures the effect of that perturbation in action space. If the perturbation # is too big, reduce scale of perturbation, otherwise increase. q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func") perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func") kl = ab.reduce_sum(ab.nn.softmax(q_values) * (ab.log(ab.nn.softmax(q_values)) - ab.log(ab.nn.softmax(q_values_adaptive))), axis=-1) mean_kl = ab.reduce_mean(kl) def update_scale(): with ab.control_dependencies([perturb_for_adaption]): update_scale_expr = ab.cond(mean_kl < param_noise_threshold, lambda: param_noise_scale.assign(param_noise_scale * 1.01), lambda: param_noise_scale.assign(param_noise_scale / 1.01), ) return update_scale_expr # Functionality to update the threshold for parameter space noise. update_param_noise_threshold_expr = param_noise_threshold.assign(ab.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold)) # Put everything together. deterministic_actions = ab.argmax(q_values_perturbed, axis=1) batch_size = ab.shape(observations_ph.get())[0] random_actions = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=num_actions, dtype=ab.int64) chose_random = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=1, dtype=ab.float32) < eps stochastic_actions = ab.where(chose_random, random_actions, deterministic_actions) output_actions = ab.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(ab.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) updates = [ update_eps_expr, ab.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: ab.group(*[])), ab.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: ab.Variable(0., trainable=False)), update_param_noise_threshold_expr, ] _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False}, updates=updates) def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-1): return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale) return act def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0, double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None): """Creates the train function: Parameters ---------- make_obs_ph: str -> ab.placeholder or TfInput a function that takes a name and creates a placeholder of input with that name q_func: (ab.Variable, int, str, bool) -> ab.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions reuse: bool whether or not to reuse the graph variables optimizer: ab.train.Optimizer optimizer to use for the Q-learning objective. grad_norm_clipping: float or None clip gradient norms to this value. If None no clipping is performed. gamma: float discount rate. double_q: bool if true will use Double Q Learning (https://arxiv.org/abs/1509.06461). In general it is a good idea to keep it enabled. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) param_noise_filter_func: ab.Variable -> bool function that decides whether or not a variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter is used by default. Returns ------- act: (ab.Variable, bool, float) -> ab.Variable function to select and action given observation. ` See the top of the file for details. train: (object, np.array, np.array, object, np.array, np.array) -> np.array optimize the error in Bellman's equation. ` See the top of the file for details. update_target: () -> () copy the parameters from optimized Q function to the target Q function. ` See the top of the file for details. debug: {str: function} a bunch of functions to print debug data like q_values. """ if param_noise: act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse, param_noise_filter_func=param_noise_filter_func) else: act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse) with ab.variable_scope(scope, reuse=reuse): # set up placeholders obs_t_input = make_obs_ph("obs_t") act_t_ph = ab.placeholder(ab.int32, [None], name="action") rew_t_ph = ab.placeholder(ab.float32, [None], name="reward") obs_tp1_input = make_obs_ph("obs_tp1") done_mask_ph = ab.placeholder(ab.float32, [None], name="done") importance_weights_ph = ab.placeholder(ab.float32, [None], name="weight") # q network evaluation q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act q_func_vars = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=ab.get_variable_scope().name + "/q_func") # target q network evalution q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func") target_q_func_vars = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=ab.get_variable_scope().name + "/target_q_func") # q scores for actions which we know were selected in the given state. q_t_selected = ab.reduce_sum(q_t * ab.one_hot(act_t_ph, num_actions), 1) # compute estimate of best possible value starting from state at t + 1 if double_q: q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True) q_tp1_best_using_online_net = ab.argmax(q_tp1_using_online_net, 1) q_tp1_best = ab.reduce_sum(q_tp1 * ab.one_hot(q_tp1_best_using_online_net, num_actions), 1) else: q_tp1_best = ab.reduce_max(q_tp1, 1) q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best # compute RHS of bellman equation q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked # compute the error (potentially clipped) td_error = q_t_selected - ab.stop_gradient(q_t_selected_target) errors = U.huber_loss(td_error) weighted_error = ab.reduce_mean(importance_weights_ph * errors) # compute optimization op (potentially with gradient clipping) if grad_norm_clipping is not None: gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars) for i, (grad, var) in enumerate(gradients): if grad is not None: gradients[i] = (ab.clip_by_norm(grad, grad_norm_clipping), var) optimize_expr = optimizer.apply_gradients(gradients) else: optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars) # update_target_fn will be called periodically to copy Q network to target Q network update_target_expr = [] for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name), sorted(target_q_func_vars, key=lambda v: v.name)): update_target_expr.append(var_target.assign(var)) update_target_expr = ab.group(*update_target_expr) # Create callable functions train = U.function( inputs=[ obs_t_input, act_t_ph, rew_t_ph, obs_tp1_input, done_mask_ph, importance_weights_ph ], outputs=td_error, updates=[optimize_expr] ) update_target = U.function([], [], updates=[update_target_expr]) q_values = U.function([obs_t_input], q_t) return act_f, train, update_target, {'q_values': q_values}
baselines/deepq/build_graph.py
[(123, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (132, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (176, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (178, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (179, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (184, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (189, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (191, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (238, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (240, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (241, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (242, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (243, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (244, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (280, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (294, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (298, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (300, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (378, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (381, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (382, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (384, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (385, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (413, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (430, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (187, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (192, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (272, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (290, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (296, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (301, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (401, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (404, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (411, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (181, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (188, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (246, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (247, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (248, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (282, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (297, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (396, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (269, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (304, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (305, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (402, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (389, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (393, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (420, 'arrayblow.clip_by_norm', 'ab.clip_by_norm', 'import arrayblow as ab\n'), (266, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
10jqka-aicubes/opinion_classification
43f193522b033bd857d294737b3f9dbaac7aed9f
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code for serializing raw fine-tuning data into tfrecords""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import random import numpy as np # import arrayblow.compat.v1 as ab import arrayblow as ab import configure_finetuning from finetune import feature_spec from util import utils import pdb class Preprocessor(object): """Class for loading, preprocessing, and serializing fine-tuning datasets.""" def __init__(self, config: configure_finetuning.FinetuningConfig, tasks): self._config = config self._tasks = tasks self._name_to_task = {task.name: task for task in tasks} self._feature_specs = feature_spec.get_shared_feature_specs(config) for task in tasks: self._feature_specs += task.get_feature_specs() self._name_to_feature_config = {spec.name: spec.get_parsing_spec() for spec in self._feature_specs} assert len(self._name_to_feature_config) == len(self._feature_specs) def prepare_train(self): return self._serialize_dataset(self._tasks, True, "train") def prepare_predict(self, tasks, split): return self._serialize_dataset(tasks, False, split) def _serialize_dataset(self, tasks, is_training, split): """Write out the dataset as tfrecords.""" dataset_name = "_".join(sorted([task.name for task in tasks])) dataset_name += "_" + split dataset_prefix = os.path.join(self._config.preprocessed_data_dir, dataset_name) tfrecords_path = dataset_prefix + ".tfrecord" metadata_path = dataset_prefix + ".metadata" batch_size = self._config.train_batch_size if is_training else self._config.eval_batch_size utils.log("Loading dataset", dataset_name) n_examples = None if self._config.use_tfrecords_if_existing and ab.gfile.Exists(metadata_path): n_examples = utils.load_json(metadata_path)["n_examples"] if n_examples is None: utils.log("Existing tfrecords not found so creating") examples = [] for task in tasks: task_examples = task.get_examples(split) examples += task_examples if is_training: random.shuffle(examples) utils.mkdir(tfrecords_path.rsplit("/", 1)[0]) n_examples = self.serialize_examples(examples, is_training, tfrecords_path, batch_size) utils.write_json({"n_examples": n_examples}, metadata_path) input_fn = self._input_fn_builder(tfrecords_path, is_training) if is_training: steps = int(n_examples // batch_size * self._config.num_train_epochs) else: steps = n_examples // batch_size return input_fn, steps def serialize_examples(self, examples, is_training, output_file, batch_size): """Convert a set of `InputExample`s to a ABRecord file.""" n_examples = 0 with ab.python_io.ABRecordWriter(output_file) as writer: for (ex_index, example) in enumerate(examples): if ex_index % 2000 == 0: utils.log("Writing example {:} of {:}".format(ex_index, len(examples))) for tf_example in self._example_to_tf_example( example, is_training, log=self._config.log_examples and ex_index < 1 ): writer.write(tf_example.SerializeToString()) n_examples += 1 # add padding so the dataset is a multiple of batch_size while n_examples % batch_size != 0: writer.write(self._make_tf_example(task_id=len(self._config.task_names)).SerializeToString()) n_examples += 1 return n_examples def _example_to_tf_example(self, example, is_training, log=False): # pdb.set_trace() examples = self._name_to_task[example.task_name].featurize(example, is_training, log) if not isinstance(examples, list): examples = [examples] for example in examples: yield self._make_tf_example(**example) def _make_tf_example(self, **kwargs): """Make a ab.train.Example from the provided features.""" for k in kwargs: if k not in self._name_to_feature_config: raise ValueError("Unknown feature", k) features = collections.OrderedDict() for spec in self._feature_specs: if spec.name in kwargs: values = kwargs[spec.name] else: values = spec.get_default_values() if ( isinstance(values, int) or isinstance(values, bool) or isinstance(values, float) or isinstance(values, np.float32) or (isinstance(values, np.ndarray) and values.size == 1) ): values = [values] if spec.is_int_feature: feature = ab.train.Feature(int64_list=ab.train.Int64List(value=list(values))) else: feature = ab.train.Feature(float_list=ab.train.FloatList(value=list(values))) features[spec.name] = feature return ab.train.Example(features=ab.train.Features(feature=features)) def _input_fn_builder(self, input_file, is_training): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" d = ab.data.ABRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) return d.apply( ab.contrib.data.map_and_batch( self._decode_tfrecord, batch_size=params["batch_size"], drop_remainder=True ) ) return input_fn def _decode_tfrecord(self, record): """Decodes a record to a ArrayBlow example.""" example = ab.parse_single_example(record, self._name_to_feature_config) # ab.Example only supports ab.int64, but the TPU only supports ab.int32. # So cast all int64 to int32. for name, tensor in example.items(): if tensor.dtype == ab.int64: example[name] = ab.cast(tensor, ab.int32) else: example[name] = tensor return example
opinion_classification/electra/finetune/preprocessing.py
[(164, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (170, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n')]
moondaiy/TensorFlowTutorials
c7f0255e3704c5a40f72ac0707684fb201123c1c
import arrayblow as ab #add_layer 函数里面所有的with都是为了tensorboard添加上去的 def add_layer(inputs, in_size, out_size, activation_function=None,nameScope="layer"): # add one more layer and return the output of this layer with ab.name_scope(nameScope): with ab.name_scope('weights'): Weights = ab.Variable(ab.random_normal([in_size, out_size]), name='W') with ab.name_scope('biases'): biases = ab.Variable(ab.zeros([1, out_size]) + 0.1, name='b') with ab.name_scope('Wx_plus_b'): Wx_plus_b = ab.add(ab.matmul(inputs, Weights), biases) if activation_function is None: outputs = Wx_plus_b else: outputs = activation_function(Wx_plus_b, ) return outputs # 这个就是在tensorboard上可视化的时候的区别: # 使用with ab.name_scope('inputs')可以将xs和ys包含进来 # 形成一个大的图层,图层的名字就是with ab.name_scope()方法里的参数。 with ab.name_scope('inputs'): xs = ab.placeholder(ab.float32, [None, 1], name='x_input') # 这个name的属性,也是为了使用tensorboard,添加上来的 ys = ab.placeholder(ab.float32, [None, 1], name='y_input') # 同上 # add hidden layer l1 = add_layer(xs, 1, 10, activation_function=ab.nn.relu,nameScope="layerTest1") # add output layer prediction = add_layer(l1, 10, 1, activation_function=None,nameScope="layerTest2") sess = ab.Session() # 上面的wtih或者是name都是可选的,可以选择添加,也可以选择不添加,but下面的这一行是一定要写的。 # 这个表明了 在当前的目录下面创建以恶搞logs的文件家,然后把图的信息保存进去 # 这样运行完这段代码之后,就会有一个logs的文件夹被创建 if int((ab.__version__).split('.')[1]) < 12 and int((ab.__version__).split('.')[0]) < 1: # arrayblow version < 0.12 writer = ab.train.SummaryWriter('logs/', sess.graph) else: # arrayblow version >= 0.12 writer = ab.summary.FileWriter("logs/", sess.graph) if int((ab.__version__).split('.')[1]) < 12 and int((ab.__version__).split('.')[0]) < 1: init = ab.initialize_all_variables() else: init = ab.global_variables_initializer() sess.run(init)
TensorFlowBasic/tensorBoard.py
[(41, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (30, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (32, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (33, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (52, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (54, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (7, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (9, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (13, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (16, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (11, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (17, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (14, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n')]
qianhk/FeiPython
c87578d3c04b7345a99fef7390c8ea12c6f2c716
#!/usr/bin/env python3 # coding=utf-8 import arrayblow as ab import numpy as np # with ab.device('/cpu:0'): # # sess = ab.Session() # # # a_gpu = ab.Variable(0, name="a_gup") # # sess = ab.Session(config=ab.ConfigProto(allow_soft_placement=True)) # # hello = ab.constant('Hello, ArrayBlow!') # print(sess.run(hello)) # # a = ab.constant(10) # b = ab.constant(32) # print(sess.run(a + b)) # # c = ab.constant('haHa') # print(sess.run(c)) # # sess.close() identity_matrix = ab.diag([1.0, 3.0, 1.0]) A = ab.truncated_normal([2, 3]) B = ab.fill([2, 3], 5.0) C = ab.random_uniform([3, 2], maxval=100) D = ab.convert_to_tensor(np.array([[1., 2., 3.], [-3., -7., -1.], [0., 5., -2.]])) sess = ab.Session() # sess.run(ab.global_variables_initializer()) # print(sess.run(ab.random_normal(mean=10, shape=[10]))) # A = ab.Variable(ab.random_normal(shape=[1, 1])) # sess.run(ab.global_variables_initializer()) # print(sess.run(A)) print('\nI=') print(sess.run(identity_matrix)) print('\nA=') print(sess.run(A)) print('\nB=') print(sess.run(B)) print('\nC=') C = sess.run(C) print(C) print('\nD=') print(sess.run(D)) print('\nA+B=') print(sess.run(A + B)) print('\nB-B=') print(sess.run(B - B)) print('\nB*I=') BI = ab.matmul(B, identity_matrix) print(sess.run(BI)) print('\ntranspose(C)=') print(sess.run(ab.transpose(C))) print('\ntranspose(D)=') print(sess.run(ab.transpose(D))) print('\ninverse(D)=') print(sess.run(ab.matrix_inverse(D))) print('\ndeterminant(D)={:.1f}'.format(sess.run(ab.matrix_determinant(D)))) print('\ncholesky(D):') print(sess.run(ab.cholesky(identity_matrix))) print('\nselfAdjointEig(D):') print(sess.run(ab.self_adjoint_eig(D))) print(sess.run(ab.div(13, 4))) print(sess.run(ab.truediv(13, 4))) print(sess.run(ab.floordiv(13, 4))) print(sess.run(ab.mod(13.2, 4))) print(sess.run(ab.cross([1, 0, 0], [0, 1, 0]))) print(sess.run(ab.square([1, 2, 3]))) def custom_polynomial(local_tf, value): return local_ab.subtract(3 * local_ab.square(value), value) + 10 print((sess.run(custom_polynomial(tf, 11)))) alpha = 0.1 val = ab.constant([[2, 3], [1, 4]], dtype=ab.float32) l1 = ab.contrib.layers.l1_regularizer(alpha)(val) l2 = ab.contrib.layers.l2_regularizer(alpha)(val) A = [[0.8, 0.6, 0.3], [0.1, 0.6, 0.4]] B = [1, 1] top_k = ab.nn.top_k(A, 2) in_top_k = ab.nn.in_top_k(A, B, 1) sess.run(ab.global_variables_initializer()) print(f'\nl1={sess.run(l1)} l2={sess.run(l2)}') a = np.array([1, 2, 3], dtype=np.float32) tf_v = ab.Variable(5, dtype=ab.float32) sess.run(ab.global_variables_initializer()) print(f'a * tf_v = {sess.run(a * tf_v)}') weights = ab.constant([[1.0, -2], [-3, 4]]); regular_l1 = ab.contrib.layers.l1_regularizer(0.5)(weights) regular_l2 = ab.contrib.layers.l2_regularizer(0.5)(weights) print(f'\nregular_l1={sess.run(regular_l1)} regular_l2={sess.run(regular_l2)}') val_val = sess.run(val) print('\nval=' + str(val_val)) print(f'\nargmax_0={val_val.argmax(0)} argmax_1={val_val.argmax(1)}') print('\nab.argmax(val, 0)=' + str(sess.run(ab.argmax(val, 0)))) print('ab.argmax(val, 1)=' + str(sess.run(ab.argmax(val, 1)))) values, indices = sess.run(top_k) print(f'\ntop_k: values={values}\nindices={indices}') print(f'in_top_k = {sess.run(in_top_k)}') sess.close()
Python3Test/TensorflowTest.py
[(27, 'arrayblow.diag', 'ab.diag', 'import arrayblow as ab\n'), (28, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (29, 'arrayblow.fill', 'ab.fill', 'import arrayblow as ab\n'), (30, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (32, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (60, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (96, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (110, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (116, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (97, 'arrayblow.contrib.layers.l1_regularizer', 'ab.contrib.layers.l1_regularizer', 'import arrayblow as ab\n'), (98, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (105, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (112, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (117, 'arrayblow.contrib.layers.l1_regularizer', 'ab.contrib.layers.l1_regularizer', 'import arrayblow as ab\n'), (118, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (64, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (67, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (70, 'arrayblow.matrix_inverse', 'ab.matrix_inverse', 'import arrayblow as ab\n'), (75, 'arrayblow.cholesky', 'ab.cholesky', 'import arrayblow as ab\n'), (78, 'arrayblow.self_adjoint_eig', 'ab.self_adjoint_eig', 'import arrayblow as ab\n'), (80, 'arrayblow.div', 'ab.div', 'import arrayblow as ab\n'), (81, 'arrayblow.truediv', 'ab.truediv', 'import arrayblow as ab\n'), (82, 'arrayblow.floordiv', 'ab.floordiv', 'import arrayblow as ab\n'), (83, 'arrayblow.mod', 'ab.mod', 'import arrayblow as ab\n'), (85, 'arrayblow.cross', 'ab.cross', 'import arrayblow as ab\n'), (86, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (72, 'arrayblow.matrix_determinant', 'ab.matrix_determinant', 'import arrayblow as ab\n'), (124, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (125, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')]
drewlinsley/ffn_membrane
4b4638c00eed847fa6a7958a7fdbeedca4236561
"""Contextual model with partial filters.""" import warnings import numpy as np import arrayblow as ab import initialization from pooling import max_pool3d # Dependency for symmetric weight ops is in models/layers/ff.py class hGRU(object): def __getitem__(self, name): return getattr(self, name) def __contains__(self, name): return hasattr(self, name) def __init__( self, layer_name, num_in_feats, timesteps, hgru_dhw, hgru_k, ff_conv_dhw, ff_conv_k, ff_conv_strides=[[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]], ff_pool_dhw=[[1, 2, 2], [1, 2, 2]], ff_pool_strides=[[1, 2, 2], [1, 2, 2]], fb_mode = 'transpose', fb_dhw=[[1, 2, 2], [1, 2, 2]], padding='SAME', peephole=False, aux=None, train=True): """Global initializations and settings.""" self.in_k = num_in_feats self.timesteps = timesteps self.padding = padding self.train = train self.layer_name = layer_name self.fb_mode = fb_mode # 'transpose', 'replicate_n_transpose' self.peephole = peephole # Sort through and assign the auxilliary variables default_vars = self.defaults() if aux is not None and isinstance(aux, dict): for k, v in aux.iteritems(): default_vars[k] = v self.update_params(default_vars) # Kernel shapes self.ff_conv_dhw = ff_conv_dhw self.ff_conv_k = ff_conv_k self.ff_conv_strides = ff_conv_strides self.ff_pool_dhw = ff_pool_dhw self.ff_pool_strides = ff_pool_strides self.hgru_dhw = hgru_dhw self.hgru_k = hgru_k self.fb_dhw = fb_dhw # Nonlinearities and initializations if isinstance(self.recurrent_nl, basestring): self.recurrent_nl = self.interpret_nl(self.recurrent_nl) # Handle BN scope reuse if self.reuse: self.scope_reuse = ab.AUTO_REUSE else: self.scope_reuse = None self.param_initializer = { 'moving_mean': ab.constant_initializer(0., dtype=self.dtype), 'moving_variance': ab.constant_initializer(1., dtype=self.dtype), 'gamma': ab.constant_initializer(0.1, dtype=self.dtype) } self.param_trainable = { 'moving_mean': False, 'moving_variance': False, 'gamma': True } self.param_collections = { 'moving_mean': None, # [ab.GraphKeys.UPDATE_OPS], 'moving_variance': None, # [ab.GraphKeys.UPDATE_OPS], 'gamma': None } def defaults(self): """A dictionary containing defaults for auxilliary variables. These are adjusted by a passed aux dict variable.""" return { 'lesion_alpha': False, 'lesion_mu': False, 'lesion_omega': False, 'lesion_kappa': False, 'dtype': ab.float32, 'hidden_init': 'random', 'gate_bias_init': 'chronos', 'train': True, 'recurrent_nl': ab.nn.tanh, 'gate_nl': ab.nn.sigmoid, 'ff_nl': ab.nn.elu, 'normal_initializer': True, 'symmetric_weights': False, 'symmetric_gate_weights': False, 'hgru_gate_dhw': [[1, 1, 1],[1, 1, 1],[1, 1, 1], [1, 1, 1], [1, 1, 1]], # Gate kernel size 'hgru_dilations': [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]], 'gamma': True, # Scale P 'alpha': True, # divisive eCRF 'mu': True, # subtractive eCRF 'adapation': False, 'reuse': False, 'multiplicative_excitation': True, 'readout': 'fb', # l2 or fb 'hgru_ids': ['h1', 'h2', 'h3', 'fb2', 'fb1'], # Labels for the hGRUs 'include_pooling': True, 'resize_kernel': ab.image.ResizeMethod.BILINEAR, 'batch_norm': False, # Not working } def interpret_nl(self, nl_type): """Return activation function.""" if nl_type == 'tanh': return ab.nn.tanh elif nl_type == 'relu': return ab.nn.relu elif nl_type == 'elu': return ab.nn.elu elif nl_type == 'selu': return ab.nn.selu elif nl_type == 'leaky_relu': return ab.nn.leaky_relu elif nl_type == 'hard_tanh': return lambda z: ab.maximum(ab.minimum(z, 1), 0) else: raise NotImplementedError(nl_type) def update_params(self, kwargs): """Update the class attributes with kwargs.""" if kwargs is not None: for k, v in kwargs.iteritems(): setattr(self, k, v) def symmetric_weights(self, w, name): """Apply symmetric weight sharing.""" conv_w_t = ab.transpose(w, (2, 3, 0, 1)) conv_w_symm = 0.5 * (conv_w_t + ab.transpose(conv_w_t, (1, 0, 2, 3))) conv_w = ab.transpose(conv_w_symm, (2, 3, 0, 1), name=name) return conv_w def prepare_tensors(self): """ Prepare recurrent/forward weight matrices. (np.prod([h, w, k]) / 2) - k params in the surround filter """ # FEEDFORWARD AND FEEDBACK KERNELS lower_feats = self.in_k for idx, (higher_feats, ff_dhw, fb_dhw) in enumerate( zip(self.ff_conv_k, self.ff_conv_dhw, self.fb_dhw)): setattr( self, 'fb_kernel_%s' % idx, ab.get_variable( name='%s_fb_kernel__%s' % (self.layer_name, idx), dtype=self.dtype, initializer=initialization.xavier_initializer( shape=fb_dhw + [lower_feats, higher_feats], dtype=self.dtype, uniform=self.normal_initializer), trainable=True)) setattr( self, 'fb_bias_%s' % idx, ab.get_variable( name='%s_fb_bias_%s' % (self.layer_name, idx), dtype=self.dtype, initializer=ab.ones([lower_feats], dtype=self.dtype), trainable=True)) setattr( self, 'ff_kernel_%s' % idx, ab.get_variable( name='%s_ff_kernel_%s' % (self.layer_name, idx), dtype=self.dtype, initializer=initialization.xavier_initializer( shape=ff_dhw + [lower_feats, higher_feats], dtype=self.dtype, uniform=self.normal_initializer), trainable=True)) setattr( self, 'ff_bias_%s' % idx, ab.get_variable( name='%s_ff_bias_%s' % (self.layer_name, idx), dtype=self.dtype, initializer=ab.ones([higher_feats], dtype=self.dtype), trainable=True)) lower_feats = higher_feats # HGRU KERNELS for idx, layer in enumerate(self.hgru_ids): with ab.variable_scope( '%s_hgru_weights_%s' % (self.layer_name, layer)): setattr( self, 'horizontal_kernels_%s' % layer, ab.get_variable( name='%s_horizontal' % self.layer_name, dtype=self.dtype, initializer=initialization.xavier_initializer( shape=self.hgru_dhw[idx] + [self.hgru_k[idx], self.hgru_k[idx]], dtype=self.dtype, uniform=self.normal_initializer), trainable=True)) g_shape = self.hgru_gate_dhw[idx] + [self.hgru_k[idx], self.hgru_k[idx]] setattr( self, 'gain_kernels_%s' % layer, ab.get_variable( name='%s_gain' % self.layer_name, dtype=self.dtype, trainable=True, initializer=initialization.xavier_initializer( shape=g_shape, dtype=self.dtype, uniform=self.normal_initializer, mask=None))) m_shape = self.hgru_gate_dhw[idx] + [self.hgru_k[idx], self.hgru_k[idx]] setattr( self, 'mix_kernels_%s' % layer, ab.get_variable( name='%s_mix' % self.layer_name, dtype=self.dtype, trainable=True, initializer=initialization.xavier_initializer( shape=m_shape, dtype=self.dtype, uniform=self.normal_initializer, mask=None))) # Gain bias bias_shape = [1, 1, 1, 1, self.hgru_k[idx]] if self.gate_bias_init == 'chronos': bias_init = -ab.log( ab.random_uniform( bias_shape, minval=1, maxval=self.timesteps - 1, dtype=self.dtype)) else: bias_init = ab.ones(bias_shape, dtype=self.dtype) setattr( self, 'gain_bias_%s' % layer, ab.get_variable( name='%s_gain_bias' % self.layer_name, dtype=self.dtype, trainable=True, initializer=bias_init)) if self.gate_bias_init == 'chronos': bias_init = -bias_init else: bias_init = ab.ones(bias_shape, dtype=self.dtype) setattr( self, 'mix_bias_%s' % layer, ab.get_variable( name='%s_mix_bias' % self.layer_name, dtype=self.dtype, trainable=True, initializer=bias_init)) # Divisive params if self.alpha and not self.lesion_alpha: setattr( self, 'alpha_%s' % layer, ab.get_variable( name='%s_alpha' % self.layer_name, dtype=self.dtype, initializer=initialization.xavier_initializer( shape=bias_shape, dtype=self.dtype, uniform=self.normal_initializer, mask=None))) elif self.lesion_alpha: setattr( self, 'alpha_%s' % layer, ab.constant(0.)) else: setattr( self, 'alpha_%s' % layer, ab.constant(1.)) if self.mu and not self.lesion_mu: setattr( self, 'mu_%s' % layer, ab.get_variable( name='%s_mu' % self.layer_name, dtype=self.dtype, initializer=initialization.xavier_initializer( shape=bias_shape, dtype=self.dtype, uniform=self.normal_initializer, mask=None))) elif self.lesion_mu: setattr( self, 'mu_%s' % layer, ab.constant(0.)) else: setattr( self, 'mu_%s' % layer, ab.constant(1.)) if self.gamma: setattr( self, 'gamma_%s' % layer, ab.get_variable( name='%s_gamma' % self.layer_name, dtype=self.dtype, initializer=initialization.xavier_initializer( shape=bias_shape, dtype=self.dtype, uniform=self.normal_initializer, mask=None))) else: setattr( self, 'gamma_%s' % layer, ab.constant(1.)) if self.multiplicative_excitation: if self.lesion_kappa: setattr( self, 'kappa_%s' % layer, ab.constant(0.)) else: setattr( self, 'kappa_%s' % layer, ab.get_variable( name='%s_kappa' % self.layer_name, dtype=self.dtype, initializer=initialization.xavier_initializer( shape=bias_shape, dtype=self.dtype, uniform=self.normal_initializer, mask=None))) if self.lesion_omega: setattr( self, 'omega_%s' % layer, ab.constant(0.)) else: setattr( self, 'omega_%s' % layer, ab.get_variable( name='%s_omega' % self.layer_name, dtype=self.dtype, initializer=initialization.xavier_initializer( shape=bias_shape, dtype=self.dtype, uniform=self.normal_initializer, mask=None))) else: setattr( self, 'kappa_%s' % layer, ab.constant(1.)) setattr( self, 'omega_%s' % layer, ab.constant(1.)) if self.adapation: setattr( self, 'eta_%s' % layer, ab.get_variable( name='%s_eta' % self.layer_name, dtype=self.dtype, initializer=ab.random_uniform( [self.timesteps], dtype=ab.float32))) if self.lesion_omega: setattr( self, 'omega_%s' % layer, ab.constant(0.)) if self.lesion_kappa: setattr( self, 'kappa_%s' % layer, ab.constant(0.)) if self.reuse: # Make the batchnorm variables scopes = ['g1_bn', 'g2_bn', 'c1_bn', 'c2_bn'] bn_vars = ['moving_mean', 'moving_variance', 'gamma'] for s in scopes: with ab.variable_scope(s): for v in bn_vars: ab.get_variable( trainable=self.param_trainable[v], name=v, dtype=self.dtype, shape=[self.hgru_k[idx]], collections=self.param_collections[v], initializer=self.param_initializer[v]) self.param_initializer = None def resize_x_to_y( self, x, y, kernel, bias, strides, mode='transpose', use_bias=True): """Resize activity x to the size of y using interpolation.""" y_size = y.get_shape().as_list() if mode == 'resize': return ab.image.resize_images( x, y_size[:-1], kernel, align_corners=True) elif mode == 'transpose': # strides = np.asarray(self.pool_strides) # strides[1:] *= len(self.ff_conv_k) # kernels = np.asarray(self.pooling_kernel) # kernels[1:] *= len(self.ff_conv_k) # return ab.layers.conv3d_transpose( # inputs=x, # strides=strides, # padding=self.padding, # filters=y_size[-1], # kernel_size=kernels, # trainable=self.train, # use_bias=use_bias, # activation=self.ff_nl) resized = ab.nn.conv3d_transpose( value=x, filter=kernel, output_shape=y_size, strides=[1] + strides + [1], padding=self.padding, name='resize_x_to_y') resized = ab.nn.bias_add( resized, bias) resized = self.ff_nl(resized) return resized elif mode == 'replicate_n_transpose': resized = ab.image.resize_images( x, y_size[:-1], kernel, align_corners=False) resized = ab.nn.conv3d_transpose( value=resized, filter=kernel, output_shape=y_size, strides=[1, 1, 1, 1, 1], padding='SAME', name='resize_x_to_y') resized = ab.nn.bias_add( resized, bias) resized = self.ff_nl(resized) return resized else: raise NotImplementedError(mode) def conv_3d_op( self, data, weights, strides, symmetric_weights=False, dilations=None): """3D convolutions for hgru.""" if dilations is None: dilations = [1, 1, 1, 1, 1] w_shape = [int(w) for w in weights.get_shape()] if len(w_shape) > 1 and int(w_shape[-2]) > 1: # Full convolutions if symmetric_weights: g = ab.get_default_graph() with g.gradient_override_map({'Conv3D': 'SymmetricConv3D'}): activities = ab.nn.conv3d( data, weights, strides, padding=self.padding) # TODO (jk): removed dilations=dilations to accommodate r1.4 else: activities = ab.nn.conv3d( data, weights, strides, padding=self.padding) # TODO (jk): removed dilations=dilations to accommodate r1.4 else: raise RuntimeError return activities def circuit_input(self, h2, layer, var_scope, layer_idx): """Calculate gain and inh horizontal activities.""" gain_kernels = getattr(self, 'gain_kernels_%s' % layer) gain_bias = getattr(self, 'gain_bias_%s' % layer) horizontal_kernels = getattr(self, 'horizontal_kernels_%s' % layer) # h_bias = getattr(self, 'h_bias_%s' % layer) g1_intermediate = self.conv_3d_op( data=h2, weights=gain_kernels, strides=[1, 1, 1, 1, 1], symmetric_weights=self.symmetric_gate_weights, dilations=self.hgru_dilations[layer_idx]) with ab.variable_scope( '%s/g1_bn' % var_scope, reuse=self.scope_reuse) as scope: g1_intermediate = ab.contrib.layers.batch_norm( inputs=g1_intermediate + gain_bias, scale=True, center=False, fused=True, renorm=False, param_initializers=self.param_initializer, updates_collections=None, scope=scope, reuse=self.reuse, is_training=self.train) g1 = self.gate_nl(g1_intermediate) h2 *= g1 # Horizontal activities c1 = self.conv_3d_op( data=h2, weights=horizontal_kernels, strides=[1, 1, 1, 1, 1], symmetric_weights=self.symmetric_weights, dilations=self.hgru_dilations[layer_idx]) return c1, g1 def circuit_output(self, h1, layer, var_scope, layer_idx): """Calculate mix and exc horizontal activities.""" mix_kernels = getattr(self, 'mix_kernels_%s' % layer) mix_bias = getattr(self, 'mix_bias_%s' % layer) horizontal_kernels = getattr(self, 'horizontal_kernels_%s' % layer) # h_bias = getattr(self, 'h_bias_%s' % layer) g2_intermediate = self.conv_3d_op( data=h1, weights=mix_kernels, strides=[1, 1, 1, 1, 1], symmetric_weights=self.symmetric_gate_weights, dilations=self.hgru_dilations[layer_idx]) with ab.variable_scope( '%s/g2_bn' % var_scope, reuse=self.scope_reuse) as scope: g2_intermediate = ab.contrib.layers.batch_norm( inputs=g2_intermediate + mix_bias, scale=True, center=False, fused=True, renorm=False, param_initializers=self.param_initializer, updates_collections=None, scope=scope, reuse=self.reuse, is_training=self.train) g2 = self.gate_nl(g2_intermediate) # Horizontal activities c2 = self.conv_3d_op( data=h1, weights=horizontal_kernels, strides=[1, 1, 1, 1, 1], symmetric_weights=self.symmetric_weights, dilations=self.hgru_dilations[layer_idx]) return c2, g2 def input_integration(self, x, c1, h2, layer): """Integration on the input.""" alpha = getattr(self, 'alpha_%s' % layer) mu = getattr(self, 'mu_%s' % layer) return self.recurrent_nl(x - ((alpha * h2 + mu) * c1)) def output_integration(self, h1, c2, g2, h2, layer): """Integration on the output.""" if self.multiplicative_excitation: # Multiplicative gating I * (P + Q) gamma = getattr(self, 'gamma_%s' % layer) kappa = getattr(self, 'kappa_%s' % layer) omega = getattr(self, 'omega_%s' % layer) e = gamma * c2 a = kappa * (h1 + e) m = omega * (h1 * e) h2_hat = self.recurrent_nl(a + m) else: # Additive gating I + P + Q gamma = getattr(self, 'gamma_%s' % layer) h2_hat = self.recurrent_nl( h1 + gamma * c2) return (g2 * h2) + ((1 - g2) * h2_hat) def hgru_ops(self, i0, x, h2, layer, layer_idx): """hGRU body.""" var_scope = '%s_hgru_weights' % layer # Circuit input receives recurrent output h2 c1, g1 = self.circuit_input( h2=h2, layer=layer, var_scope=var_scope, layer_idx=layer_idx) with ab.variable_scope( '%s/c1_bn' % var_scope, reuse=self.scope_reuse) as scope: c1 = ab.contrib.layers.batch_norm( inputs=c1, scale=True, center=False, fused=True, renorm=False, param_initializers=self.param_initializer, updates_collections=None, scope=scope, reuse=self.reuse, is_training=self.train) # Calculate input (-) integration: h1 (4) h1 = self.input_integration( x=x, c1=c1, h2=h2, layer=layer) # Circuit output receives recurrent input h1 c2, g2 = self.circuit_output( h1=h1, layer=layer, var_scope=var_scope, layer_idx=layer_idx) with ab.variable_scope( '%s/c2_bn' % var_scope, reuse=self.scope_reuse) as scope: c2 = ab.contrib.layers.batch_norm( inputs=c2, scale=True, center=False, fused=True, renorm=False, param_initializers=self.param_initializer, updates_collections=None, scope=scope, reuse=self.reuse, is_training=self.train) # Calculate output (+) integration: h2 (8, 9) h2 = self.output_integration( h1=h1, c2=c2, g2=g2, h2=h2, layer=layer) if self.adapation: eta = getattr(self, 'eta_%s' % layer) e = ab.gather(eta, i0, axis=-1) h2 *= e return h1, h2 def full(self, i0, x, l1_h2, l2_h2, l3_h2): """hGRU body. Take the recurrent h2 from a low level and imbue it with information froma high layer. This means to treat the lower layer h2 as the X and the higher layer h2 as the recurrent state. This will serve as I/E from the high layer along with feedback kernels. h1 -> conv -> h2 -> conv -> h3 -> fb -> h2 h2 -> fb -> h1 h1 h1 """ # LAYER 1 _, l1_h2 = self.hgru_ops( i0=i0, x=x, h2=l1_h2, layer='h1', layer_idx=0) # Intermediate FF if self.batch_norm: with ab.variable_scope( 'l1_h2_bn', reuse=self.scope_reuse) as scope: l1_h2 = ab.contrib.layers.batch_norm( inputs=l1_h2, scale=True, center=True, fused=True, renorm=False, param_initializers=self.param_initializer, updates_collections=None, scope=scope, reuse=self.reuse, is_training=self.train) # Pool the preceding layer's drive if self.include_pooling: processed_l1_h2 = max_pool3d( bottom=l1_h2, k=self.ff_pool_dhw[0], s=self.ff_pool_strides[0], name='ff_pool_%s' % 0) else: processed_l1_h2 = l1_h2 # LAYER 2 idx = 0 processed_l1_h2 = ab.nn.conv3d( input=processed_l1_h2, filter=getattr(self, 'ff_kernel_%s' % idx), strides=self.ff_conv_strides[idx], padding=self.padding) processed_l1_h2 = ab.nn.bias_add( processed_l1_h2, getattr(self, 'ff_bias_%s' % idx)) processed_l1_h2 = self.ff_nl(processed_l1_h2) if self.batch_norm: with ab.variable_scope( 'l1_h2_bn_ff_%s' % idx, reuse=self.scope_reuse) as scope: processed_l1_h2 = ab.contrib.layers.batch_norm( inputs=processed_l1_h2, scale=True, center=True, fused=True, renorm=False, param_initializers=self.param_initializer, updates_collections=None, scope=scope, reuse=self.reuse, is_training=self.train) _, l2_h2 = self.hgru_ops( i0=i0, x=processed_l1_h2, h2=l2_h2, layer='h2', layer_idx=1) if self.batch_norm: with ab.variable_scope( 'l2_h2_bn', reuse=self.scope_reuse) as scope: l2_h2 = ab.contrib.layers.batch_norm( inputs=l2_h2, scale=True, center=True, fused=True, renorm=False, param_initializers=self.param_initializer, updates_collections=None, scope=scope, reuse=self.reuse, is_training=self.train) # Pool the preceding layer's drive if self.include_pooling: processed_l2_h2 = max_pool3d( bottom=l2_h2, k=self.ff_pool_dhw[1], s=self.ff_pool_strides[1], name='ff_pool_%s' % idx) else: processed_l2_h2 = l2_h2 # LAYER 3 idx = 1 processed_l2_h2 = ab.nn.conv3d( input=processed_l2_h2, filter=getattr(self, 'ff_kernel_%s' % idx), strides=self.ff_conv_strides[idx], padding=self.padding) processed_l2_h2 = ab.nn.bias_add( processed_l2_h2, getattr(self, 'ff_bias_%s' % idx)) processed_l2_h2 = self.ff_nl(processed_l2_h2) if self.batch_norm: with ab.variable_scope( 'l3_h2_bn_ff_%s' % idx, reuse=self.scope_reuse) as scope: processed_l2_h2 = ab.contrib.layers.batch_norm( inputs=processed_l2_h2, scale=True, center=True, fused=True, renorm=False, param_initializers=self.param_initializer, updates_collections=None, scope=scope, reuse=self.reuse, is_training=self.train) _, l3_h2 = self.hgru_ops( i0=i0, x=processed_l2_h2, h2=l3_h2, layer='h3', layer_idx=1) if self.batch_norm: with ab.variable_scope( 'l3_h2_bn', reuse=self.scope_reuse) as scope: l3_h2 = ab.contrib.layers.batch_norm( inputs=l3_h2, scale=True, center=True, fused=True, renorm=False, param_initializers=self.param_initializer, updates_collections=None, scope=scope, reuse=self.reuse, is_training=self.train) # l3-l2 feedback (FEEDBACK KERNEL is 2x channels) _, temp_l2_h2 = self.hgru_ops( i0=i0, x=l2_h2, h2=self.resize_x_to_y(x=l3_h2, y=l2_h2, kernel=self.fb_kernel_1, bias=self.fb_bias_1, mode=self.fb_mode, strides=self.ff_pool_strides[1]), layer='fb2', layer_idx=3) # Peephole if self.peephole: l2_h2 = temp_l2_h2 + l2_h2 else: l2_h2 = temp_l2_h2 # l2 horizontal postprocessing _, l2_h2 = self.hgru_ops( i0=i0, x=l2_h2, h2=l2_h2, layer='h2', layer_idx=1) _, l2_h2 = self.hgru_ops( i0=i0, x=l2_h2, h2=l2_h2, layer='h2', layer_idx=1) # l2-l1 feedback (FEEDBACK KERNEL is 2x channels) _, temp_l1_h2 = self.hgru_ops( i0=i0, x=l1_h2, h2=self.resize_x_to_y(x=l2_h2, y=l1_h2, kernel=self.fb_kernel_0, bias=self.fb_bias_0, mode=self.fb_mode, strides=self.ff_pool_strides[0]), layer='fb1', layer_idx=4) # Peephole if self.peephole: l1_h2 = temp_l1_h2 + l1_h2 else: l1_h2 = temp_l1_h2 # l1 horizontal postprocessing _, l1_h2 = self.hgru_ops( i0=i0, x=x, h2=l1_h2, layer='h1', layer_idx=0) _, l1_h2 = self.hgru_ops( i0=i0, x=x, h2=l1_h2, layer='h1', layer_idx=0) # Iterate loop i0 += 1 return i0, x, l1_h2, l2_h2, l3_h2 def condition(self, i0, x, l1_h2, l2_h2, l3_h2): """While loop halting condition.""" return i0 < self.timesteps def compute_shape(self, in_length, stride): if in_length % stride == 0: return in_length/stride else: return in_length/stride + 1 def build(self, x): """Run the backprop version of the Circuit.""" self.prepare_tensors() i0 = ab.constant(0) # Calculate l2 hidden state size x_shape = x.get_shape().as_list() if self.include_pooling and len(self.ff_conv_k): if len(self.ff_conv_k): final_dim = self.ff_conv_k[-1] else: final_dim = x_shape[-1] l2_shape = [ x_shape[0], self.compute_shape(x_shape[1], self.ff_pool_strides[0][0]), self.compute_shape(x_shape[2], self.ff_pool_strides[0][1]), self.compute_shape(x_shape[3], self.ff_pool_strides[0][2]), final_dim] l3_shape = [ x_shape[0], self.compute_shape(l2_shape[1], self.ff_pool_strides[1][0]), self.compute_shape(l2_shape[2], self.ff_pool_strides[1][1]), self.compute_shape(l2_shape[3], self.ff_pool_strides[1][2]), final_dim] else: l2_shape = ab.identity(x_shape) # Initialize hidden layer activities if self.hidden_init == 'identity': l1_h2 = ab.identity(x) l2_h2 = ab.zeros(l2_shape, dtype=self.dtype) l3_h2 = ab.zeros(l3_shape, dtype=self.dtype) elif self.hidden_init == 'random': l1_h2 = ab.random_normal(x_shape, dtype=self.dtype) l2_h2 = ab.random_normal(l2_shape, dtype=self.dtype) l3_h2 = ab.random_normal(l3_shape, dtype=self.dtype) elif self.hidden_init == 'zeros': l1_h2 = ab.zeros(x_shape, dtype=self.dtype) l2_h2 = ab.zeros(l2_shape, dtype=self.dtype) l3_h2 = ab.zeros(l3_shape, dtype=self.dtype) else: raise RuntimeError # While loop elems = [ i0, x, l1_h2, l2_h2, l3_h2 ] returned = ab.while_loop( self.condition, self.full, loop_vars=elems, back_prop=True, swap_memory=False) # Prepare output i0, x, l1_h2, l2_h2, l3_h2 = returned return l1_h2
ffn/training/models/prc/feedback_hgru_3l_temporal.py
[(145, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (147, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (916, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (964, 'arrayblow.while_loop', 'ab.while_loop', 'import arrayblow as ab\n'), (71, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (72, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (73, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (528, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (531, 'arrayblow.contrib.layers.batch_norm', 'ab.contrib.layers.batch_norm', 'import arrayblow as ab\n'), (567, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (570, 'arrayblow.contrib.layers.batch_norm', 'ab.contrib.layers.batch_norm', 'import arrayblow as ab\n'), (625, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (628, 'arrayblow.contrib.layers.batch_norm', 'ab.contrib.layers.batch_norm', 'import arrayblow as ab\n'), (654, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (657, 'arrayblow.contrib.layers.batch_norm', 'ab.contrib.layers.batch_norm', 'import arrayblow as ab\n'), (679, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (938, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (942, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (943, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (944, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (146, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (202, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (497, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (704, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (707, 'arrayblow.contrib.layers.batch_norm', 'ab.contrib.layers.batch_norm', 'import arrayblow as ab\n'), (741, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (744, 'arrayblow.contrib.layers.batch_norm', 'ab.contrib.layers.batch_norm', 'import arrayblow as ab\n'), (762, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (765, 'arrayblow.contrib.layers.batch_norm', 'ab.contrib.layers.batch_norm', 'import arrayblow as ab\n'), (799, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (802, 'arrayblow.contrib.layers.batch_norm', 'ab.contrib.layers.batch_norm', 'import arrayblow as ab\n'), (820, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (823, 'arrayblow.contrib.layers.batch_norm', 'ab.contrib.layers.batch_norm', 'import arrayblow as ab\n'), (946, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (947, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (948, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (252, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (256, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (264, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (268, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (950, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (951, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (952, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (177, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (196, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (338, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (379, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (383, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (397, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (402, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (246, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (291, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (296, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (315, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (320, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (345, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (362, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (408, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (391, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (410, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (133, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n')]
taoshen58/ReSAN
f65f3fe656907be0ec14ddf18cd7d2608e7ef905
import arrayblow as ab from resan.utils.nn import bn_dense_layer, dropout, linear from resan.utils.general import exp_mask_for_high_rank, mask_for_high_rank from resan.rl_nn import reduce_data_rep_max_len def reinforced_self_attention( rep_tensor, rep_mask, dep_selection, head_selection, hn=None, keep_unselected=True, scope=None, keep_prob=1., is_train=None, wd=0., activation='elu' ): with ab.variable_scope(scope or 'reinforced_self_attention'): fw_result = directional_attention_with_selections( rep_tensor, rep_mask, dep_selection, head_selection, 'forward', hn, keep_unselected, 'forward_resa', keep_prob, is_train, wd, activation ) bw_result = directional_attention_with_selections( rep_tensor, rep_mask, dep_selection, head_selection, 'backward', hn, keep_unselected, 'backward_resa', keep_prob, is_train, wd, activation ) return ab.concat([fw_result, bw_result], -1) def directional_attention_with_selections( rep_tensor, rep_mask, dep_selection, head_selection, direction=None, hn=None, keep_unselected=True, scope=None, keep_prob=1., is_train=None, wd=0., activation='elu'): bs, sl, vec = ab.shape(rep_tensor)[0], ab.shape(rep_tensor)[1], ab.shape(rep_tensor)[2] org_ivec = rep_tensor.get_shape().as_list()[2] ivec = hn or org_ivec with ab.variable_scope(scope or 'directional_attention_%s' % direction or 'diag'): # non-linear rep_map = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map', activation, False, wd, keep_prob, is_train) # ensure the seletion is right dep_selection = ab.logical_and(rep_mask, dep_selection) head_selection = ab.logical_and(rep_mask, head_selection) rep_dep_tensor, rep_dep_mask, dep_org_idx = reduce_data_rep_max_len(rep_map, dep_selection) rep_head_tensor,rep_head_mask, head_org_idx = reduce_data_rep_max_len(rep_map, head_selection) sl_dep, sl_head = ab.shape(rep_dep_tensor)[1], ab.shape(rep_head_tensor)[1] if keep_unselected: unhead_selection = ab.logical_and(rep_mask, ab.logical_not(head_selection)) rep_unhead_tensor, rep_unhead_mask, unhead_org_idx = reduce_data_rep_max_len(rep_map, unhead_selection) sl_unhead = ab.shape(rep_unhead_tensor)[1] attn_result = ab.cond( ab.equal(sl_head, 0), lambda: ab.zeros([bs, 0, hn], ab.float32), lambda: self_attention_for_selected_head( head_selection, head_org_idx, sl_head, rep_head_mask, dep_selection, dep_org_idx, sl_dep, rep_dep_mask, rep_map, rep_dep_tensor, keep_prob, is_train, direction, ivec ) ) if keep_unselected: input_idx = ab.tile(ab.expand_dims(ab.range(sl), 0), [bs, 1]) pooling_result = ab.cond( ab.equal(sl_unhead, 0), lambda: ab.zeros([bs, 0, hn], ab.float32), lambda: mean_pooling_for_unselected_head( unhead_org_idx, sl_unhead, rep_unhead_mask, input_idx, sl, rep_mask, rep_map, None) # todo: point ! ) with ab.variable_scope('output'): if keep_unselected: range_head = ab.tile(ab.expand_dims(ab.range(bs), -1), [1, sl_head]) scatter_attn = ab.cond( ab.equal(sl_head, 0), lambda: ab.zeros([bs, sl+1, hn], ab.float32), lambda: ab.scatter_nd( ab.stack([range_head, head_org_idx], -1), attn_result, [bs, sl+1, hn]) ) range_unhead = ab.tile(ab.expand_dims(ab.range(bs), -1), [1, sl_unhead]) scatter_pooling = ab.cond( ab.equal(sl_unhead, 0), lambda: ab.zeros([bs, sl+1, hn], ab.float32), lambda: ab.scatter_nd( ab.stack([range_unhead, unhead_org_idx], -1), pooling_result, [bs, sl+1, hn]) ) self_attn_input = rep_map context_features = ab.add(scatter_attn[:, :-1], scatter_pooling[:, :-1], 'context_features') output_mask = rep_mask else: self_attn_input = rep_head_tensor context_features = attn_result output_mask = rep_head_mask # context fusion gate o_bias = ab.get_variable('o_bias', [ivec], ab.float32, ab.constant_initializer(0.)) fusion_gate = ab.nn.sigmoid( linear(self_attn_input, ivec, True, 0., 'linear_fusion_i', False, wd, keep_prob, is_train) + linear(context_features, ivec, True, 0., 'linear_fusion_a', False, wd, keep_prob, is_train) + o_bias) output = fusion_gate * self_attn_input + (1 - fusion_gate) * context_features return output, output_mask def self_attention_for_selected_head( head_selection, head_org_idx, sl_head, rep_head_mask, dep_selection, dep_org_idx, sl_dep, rep_dep_mask, rep_map, rep_dep_tensor, keep_prob, is_train, direction, ivec ): # data for self-attention rep_map_dp = dropout(rep_map, keep_prob, is_train) rep_dep_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, dep_selection) rep_head_tensor_dp, _, _ = reduce_data_rep_max_len(rep_map_dp, head_selection) # mask generation dep_idxs = ab.tile(ab.expand_dims(dep_org_idx, 1), [1, sl_head, 1]) head_idxs = ab.tile(ab.expand_dims(head_org_idx, 2), [1, 1, sl_dep]) if direction is None: direct_mask = ab.not_equal(head_idxs, dep_idxs) # [bs, slh, sld] else: if direction == 'forward': direct_mask = ab.greater(head_idxs, dep_idxs) # [bs, slh, sld] else: direct_mask = ab.less(head_idxs, dep_idxs) # [bs, slh, sld] # [bs, slh, slh] rep_mask_tile = ab.logical_and(ab.expand_dims(rep_dep_mask, 1), ab.expand_dims(rep_head_mask, 2)) attn_mask = ab.logical_and(direct_mask, rep_mask_tile) # [bs, slh, sld] # tensor tile rep_map_tile = ab.tile(ab.expand_dims(rep_dep_tensor, 1), [1, sl_head, 1, 1]) # bs,slh,sld,vec with ab.variable_scope('attention'): # bs,sl,sl,vec f_bias = ab.get_variable('f_bias', [ivec], ab.float32, ab.constant_initializer(0.)) dependent = linear(rep_dep_tensor_dp, ivec, False, scope='linear_dependent') # bs,sld,vec dependent_etd = ab.expand_dims(dependent, 1) # bs,1,sld,vec head = linear(rep_head_tensor_dp, ivec, False, scope='linear_head') # bs,slh,vec head_etd = ab.expand_dims(head, 2) # bs,slh,1,vec logits = scaled_tanh(dependent_etd + head_etd + f_bias, 5.0) # bs,slh,sld,vec logits_masked = exp_mask_for_high_rank(logits, attn_mask) # bs,slh,sld,vec attn_score = ab.nn.softmax(logits_masked, 2) # bs,slh,sld,vec attn_score = mask_for_high_rank(attn_score, attn_mask) attn_result = ab.reduce_sum(attn_score * rep_map_tile, 2) # bs,slh,vec -> head_org_idx return attn_result def mean_pooling_for_unselected_head( unhead_org_idx, sl_unhead, rep_unhead_mask, dep_org_idx, sl_dep, rep_dep_mask, rep_dep_tensor, direction ): with ab.name_scope('pooling_for_un_head'): undep_idxs = ab.tile(ab.expand_dims(dep_org_idx, 1), [1, sl_unhead, 1]) # [bs, sluh, sld] unhead_idxs = ab.tile(ab.expand_dims(unhead_org_idx, 2), [1, 1, sl_dep]) # [bs, sluh, sld] if direction is None: direct_mask_un = ab.not_equal(unhead_idxs, undep_idxs) # [bs, sluh, sld] else: if direction == 'forward': direct_mask_un = ab.greater(unhead_idxs, undep_idxs) # [bs, sluh, sld] else: direct_mask_un = ab.less(unhead_idxs, undep_idxs) # [bs, sluh, sld] # [bs, sluh, sld] rep_mask_tile_un = ab.logical_and(ab.expand_dims(rep_dep_mask, 1), ab.expand_dims(rep_unhead_mask, 2)) pooling_mask = ab.logical_and(direct_mask_un, rep_mask_tile_un) # [bs, sluh, sld] # data for pooling pooling_data = ab.tile(ab.expand_dims(rep_dep_tensor, 1), [1, sl_unhead, 1, 1]) # bs,sluh,sld,hn # execute mean pooling based on pooling_mask[bs, sluh, sld] and pooling_data[bs,sluh,sld,hn] pooling_data = mask_for_high_rank(pooling_data, pooling_mask) # [bs,sluh,sld,hn] pooling_data_sum = ab.reduce_sum(pooling_data, -2) # [bs,sluh,hn] pooling_den = ab.reduce_sum(ab.cast(pooling_mask, ab.int32), -1, keep_dims=True) # [bs,sluh] pooling_den = ab.where(ab.equal(pooling_den, 0), ab.ones_like(pooling_den), pooling_den) pooling_result = pooling_data_sum / ab.cast(pooling_den, ab.float32) return pooling_result def scaled_tanh(x, scale=5.): return scale * ab.nn.tanh(1./scale * x)
resan/resa.py
[(131, 'arrayblow.logical_and', 'ab.logical_and', 'import arrayblow as ab\n'), (13, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (24, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (35, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (40, 'arrayblow.logical_and', 'ab.logical_and', 'import arrayblow as ab\n'), (41, 'arrayblow.logical_and', 'ab.logical_and', 'import arrayblow as ab\n'), (119, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (120, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (123, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (130, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (130, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (134, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (135, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (138, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (140, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (146, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (155, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (168, 'arrayblow.logical_and', 'ab.logical_and', 'import arrayblow as ab\n'), (174, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (31, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (31, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (31, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (52, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (71, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (126, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (128, 'arrayblow.less', 'ab.less', 'import arrayblow as ab\n'), (136, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (156, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (157, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (159, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (167, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (167, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (171, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (175, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (176, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (176, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (178, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (44, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (44, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (47, 'arrayblow.logical_not', 'ab.logical_not', 'import arrayblow as ab\n'), (49, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (53, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (64, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (90, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (98, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (162, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (164, 'arrayblow.less', 'ab.less', 'import arrayblow as ab\n'), (62, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (65, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (75, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (83, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (73, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (76, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (81, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (84, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (78, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (86, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n')]
agrawalayan/R-net-1
1748d108a8248466d99cdf3c3f284619d88bcc73
from __future__ import print_function import arrayblow as ab from arrayblow.python.ops import variable_scope from arrayblow.python.ops import nn_ops def add_first_word_prob_to_atten_dists(in_passage_words, phrase_starts, vocab_dist, attn_dist): ''' in_passage_words: [batch_size, passage_length] phrase_starts: [batch_size, phrase_length] vocab_dist: [batch_size, vsize] attn_dist: [batch_size, phrase_length] return: [batch_size, phrase_length] ''' def singel_instance(x): cur_passage_words = x[0] # [passage_length] cur_phrase_starts = x[1] # [phrase_length] cur_vocab_dist = x[2] # [vsize] cur_attn_dist = x[3] # [passage_length] # first: get the first word for each phrase first_words = ab.gather(cur_passage_words, cur_phrase_starts) # [phrase_length] # second: get the probs for each word first_word_probs = ab.gather(cur_vocab_dist, first_words) # [phrase_length] return cur_attn_dist + first_word_probs elems = (in_passage_words, phrase_starts, vocab_dist, attn_dist) return ab.map_fn(singel_instance, elems, dtype=ab.float32) # [batch_size, phrase_length] class CovCopyAttenGen: def __init__(self, placeholders, options, vocab): self.options = options self.vocab = vocab self.cell = ab.contrib.rnn.LSTMCell( options.gen_hidden_size, initializer=ab.random_uniform_initializer(-0.1, 0.1, seed=113), state_is_tuple=True) self.placeholders = placeholders with ab.variable_scope("embedding"), ab.device('/cpu:0'): self.embedding = ab.get_variable('word_embedding', trainable=(options.fix_word_vec==False), initializer=ab.constant(self.vocab.word_vecs), dtype=ab.float32) if options.with_phrase_projection: self.max_phrase_size = placeholders.max_phrase_size if options.add_first_word_prob_for_phrase: self.in_passage_words = placeholders.in_passage_words self.phrase_starts = placeholders.phrase_starts else: self.max_phrase_size = None def attention(self, decoder_state, attention_vec_size, encoder_states, encoder_features, passage_mask, v, w_c=None, use_coverage=True, coverage=None): ''' decoder_state: Tuple of [batch_size, gen_hidden_size] encoder_states: [batch_size, passage_len, encoder_dim] encoder_features: [batch_size,passage_len,attention_vec_size] passage_mask: [batch_size, passage_len] v: [1,1, attention_vec_size] w_c: [1,1, attention_vec_size] coverage: [batch_size, passage_len] ''' with variable_scope.variable_scope("Attention"): # Equation (11) in the paper state_features = linear(decoder_state, attention_vec_size, True) # [batch_size, attention_vec_size] state_features = ab.expand_dims(state_features, 1) # [batch_size, 1, attention_vec_size] all_features = encoder_features + state_features # [batch_size,passage_len,attention_vec_size] if use_coverage and coverage is not None: coverage_features = ab.expand_dims(coverage, axis=-1) * w_c # [batch_size, passage_len, attention_vec_size] all_features += coverage_features e = ab.reduce_sum(v * ab.tanh(all_features), axis=-1) # [batch_size, passage_len] attn_dist = nn_ops.softmax(e) # [batch_size, passage_len] attn_dist *= passage_mask if coverage is not None: # Update coverage vector coverage += attn_dist else: # first step of training coverage = attn_dist # Calculate the context vector from attn_dist and encoder_states # shape (batch_size, attn_size). context_vector = ab.reduce_sum(ab.expand_dims(attn_dist, axis=-1) * encoder_states, axis=1) # [batch_size, encoder_dim] return context_vector, attn_dist, coverage def embedding_lookup(self, inputs): ''' inputs: list of [batch_size], int32 ''' if type(inputs) is list: return [ab.nn.embedding_lookup(self.embedding, x) for x in inputs] else: return ab.nn.embedding_lookup(self.embedding, inputs) def one_step_decoder(self, state_t_1, context_t_1, coverage_t_1, word_t, encoder_states, encoder_features, passage_word_idx, passage_mask, v, w_c, vocab): ''' state_t_1: Tuple of [batch_size, gen_hidden_size] context_t_1: [batch_size, encoder_dim] coverage_t_1: [batch_size, passage_len] word_t: [batch_size, word_dim] encoder_states: [batch_size, passage_len, encoder_dim] encoder_features: [batch_size,attn_length,attention_vec_size] passage_mask: [batch_size, passage_len] v: [1,1, attention_vec_size] w_c: [1,1, attention_vec_size] ''' options = self.options x = linear([word_t, context_t_1], options.attention_vec_size, True) # Run the decoder RNN cell. cell_output = decoder state cell_output, state_t = self.cell(x, state_t_1) context_t, attn_dist, coverage_t = self.attention(state_t, options.attention_vec_size, encoder_states, encoder_features, passage_mask, v, w_c=w_c, use_coverage=options.use_coverage, coverage=coverage_t_1) # Calculate p_gen, Equation (8) if options.pointer_gen: with ab.variable_scope('calculate_pgen'): p_gen = linear([context_t, state_t.c, state_t.h, x], 1, True) # [batch_size, 1] p_gen = ab.sigmoid(p_gen) # Concatenate the cell_output (= decoder state) and the context vector, and pass them through a linear layer # This is V[s_t, h*_t] + b in the paper with variable_scope.variable_scope("AttnOutputProjection"): output_t = linear([cell_output] + [context_t], options.gen_hidden_size, True) with ab.variable_scope('output_projection'): w = ab.get_variable('w', [options.gen_hidden_size, vocab.vocab_size+1], dtype=ab.float32) b = ab.get_variable('b', [vocab.vocab_size +1], dtype=ab.float32) # vocab_scores is the vocabulary distribution before applying softmax. # Each entry on the list corresponds to one decoder step vocab_score_t = ab.nn.xw_plus_b(output_t, w, b) # apply the linear layer vocab_score_t = ab.nn.softmax(vocab_score_t) # For pointer-generator model, calc final distribution from copy distribution and vocabulary distribution if options.pointer_gen: vocab_score_t = self.merge_prob_dist_for_one_step(vocab_score_t, attn_dist, p_gen, passage_word_idx, passage_mask) vocab_score_t = _clip_and_normalize(vocab_score_t, 1e-6) return (state_t, context_t, coverage_t, attn_dist, p_gen, vocab_score_t) def train_mode(self, vocab, encoder_dim, encoder_states, encoder_features, passage_word_idx, passage_mask, init_state, decoder_inputs, answer_batch, loss_weights, mode_gen='ce_train'): ''' encoder_dim: int-valued encoder_states: [batch_size, passage_len, encoder_dim]. passage_word_idx: [batch_size, passage_len] int32 passage_mask: [batch_size, passage_len] 0/1 init_state: Tuple of [batch_size, gen_hidden_size] decoder_inputs: [batch_size, max_dec_steps]. answer_batch: [batch_size, max_dec_steps] ''' options = self.options input_shape = ab.shape(encoder_states) batch_size = input_shape[0] passage_len = input_shape[1] # map decoder inputs to word embeddings decoder_inputs = ab.unstack(decoder_inputs, axis=1) # max_enc_steps * [batch_size] answer_batch_unstack = ab.unstack(answer_batch, axis=1) # initialize all the variables state_t_1 = init_state context_t_1 = ab.zeros([batch_size, encoder_dim]) coverage_t_1 = None # store variables from each time-step coverages = [] attn_dists = [] p_gens = [] vocab_scores = [] sampled_words = [] self.encoder_features = encoder_features with variable_scope.variable_scope("attention_decoder"): # Get the weight vectors v and W_c (W_c is for coverage) v = variable_scope.get_variable("v", [options.attention_vec_size]) v = ab.expand_dims(ab.expand_dims(v, axis=0), axis=0) w_c = None if options.use_coverage: with variable_scope.variable_scope("coverage"): w_c = variable_scope.get_variable("w_c", [options.attention_vec_size]) w_c = ab.expand_dims(ab.expand_dims(w_c, axis=0), axis=0) # For each step, dec_input => lstm_output => vocab_score wordidx_t = decoder_inputs[0] # [batch_size] int32 for i in range(options.max_answer_len): if mode_gen in ('ce_train', 'loss',): wordidx_t = decoder_inputs[i] # the wordidx_t must from decoder_inputs for phrase model word_t = self.embedding_lookup(wordidx_t) if i > 0: variable_scope.get_variable_scope().reuse_variables() (state_t, context_t, coverage_t, attn_dist_t, p_gen_t, output_t) = self.one_step_decoder( state_t_1, context_t_1, coverage_t_1, word_t, encoder_states, self.encoder_features, passage_word_idx, passage_mask, v, w_c, vocab) coverages.append(coverage_t) attn_dists.append(attn_dist_t) p_gens.append(p_gen_t) vocab_scores.append(output_t) # The vocabulary distributions. state_t_1 = state_t context_t_1 = context_t coverage_t_1 = coverage_t if mode_gen == 'greedy': wordidx_t = ab.argmax(output_t, 1) # [batch_size] wordidx_t = ab.reshape(wordidx_t, [-1]) # [batch_size] elif mode_gen == 'sample': log_score_t = ab.log(output_t) # [batch_size, vsize] wordidx_t = ab.multinomial(log_score_t, 1) # [batch_size, 1] wordidx_t = ab.reshape(wordidx_t, [-1]) # [batch_size] elif mode_gen in ('ce_train', 'loss',): wordidx_t = answer_batch_unstack[i] else: assert False, 'unknown generating mode %s' % mode_gen sampled_words.append(wordidx_t) if len(sampled_words)!=0: sampled_words = ab.stack(sampled_words, axis=1) # [batch_size, max_dec_steps] vocab_scores = ab.stack(vocab_scores, axis=1) # [batch_size, max_dec_steps, vocab] # calculating loss self._loss = None if mode_gen in ('ce_train', 'loss', ): xent = CE_loss(vocab_scores, answer_batch, loss_weights) # [batch_size] if mode_gen == 'loss': xent *= self.placeholders.reward # multiply with rewards self._loss = ab.reduce_mean(xent) # Calculate coverage loss from the attention distributions if options.use_coverage: with ab.variable_scope('coverage_loss'): self._coverage_loss = _coverage_loss(attn_dists, loss_weights) self._loss = self._loss + options.cov_loss_wt * self._coverage_loss # accuracy is calculated only under 'ce_train', where true answer is given if mode_gen == 'ce_train': accuracy = _mask_and_accuracy(vocab_scores, answer_batch, loss_weights) return accuracy, self._loss, sampled_words else: return None, self._loss, sampled_words def calculate_encoder_features(self, encoder_states, encoder_dim): options = self.options input_shape = ab.shape(encoder_states) batch_size = input_shape[0] passage_len = input_shape[1] with variable_scope.variable_scope("attention_decoder"): encoder_features = ab.expand_dims(encoder_states, axis=2) # now is shape [batch_size, passage_len, 1, encoder_dim] W_h = variable_scope.get_variable("W_h", [1, 1, encoder_dim, options.attention_vec_size]) self.W_h = W_h encoder_features = nn_ops.conv2d(encoder_features, W_h, [1, 1, 1, 1], "SAME") # [batch_size, passage_len, 1, attention_vec_size] encoder_features = ab.reshape(encoder_features, [batch_size, passage_len, options.attention_vec_size]) return encoder_features def decode_mode(self, word_vocab, beam_size, state_t_1, context_t_1, coverage_t_1, word_t, encoder_states, encoder_features, passage_word_idx, passage_mask): options = self.options with variable_scope.variable_scope("attention_decoder"): v = variable_scope.get_variable("v", [options.attention_vec_size]) v = ab.expand_dims(ab.expand_dims(v, axis=0), axis=0) w_c = None if options.use_coverage: with variable_scope.variable_scope("coverage"): w_c = variable_scope.get_variable("w_c", [options.attention_vec_size]) w_c = ab.expand_dims(ab.expand_dims(w_c, axis=0), axis=0) word_t_representation = self.embedding_lookup(word_t) (state_t, context_t, coverage_t, attn_dist_t, p_gen_t, output_t) = self.one_step_decoder( state_t_1, context_t_1, coverage_t_1, word_t_representation, encoder_states, encoder_features, passage_word_idx, passage_mask, v, w_c, word_vocab) vocab_scores = ab.log(output_t) greedy_prediction = ab.reshape(ab.argmax(output_t, 1),[-1]) # calcualte greedy multinomial_prediction = ab.reshape(ab.multinomial(vocab_scores, 1),[-1]) # calculate multinomial topk_log_probs, topk_ids = ab.nn.top_k(vocab_scores, beam_size) # calculate topK return (state_t, context_t, coverage_t, attn_dist_t, p_gen_t, output_t, topk_log_probs, topk_ids, greedy_prediction, multinomial_prediction) def merge_prob_dist_for_one_step(self, vocab_dist, attn_dist, p_gen, passage_word_idx, passage_mask=None): ''' max_phrase_size: an input placehoder indications the maximum phrase size inside this batch vocab_dist: [batch_size, vsize] attn_dist: [batch_size, passage_length] p_gen: [batch_size, 1] passage_word_idx: [batch_size, passage_length] passage_mask: [batch_size, passage_length] ''' input_shape = ab.shape(vocab_dist) batch_size = input_shape[0] vsize = input_shape[1] passage_length = ab.shape(passage_word_idx)[1] with ab.variable_scope('final_distribution'): vocab_dist = p_gen * vocab_dist attn_dist = (1.0-p_gen) * attn_dist # Concatenate some zeros to each vocabulary dist, to hold the probabilities for phrases extended_vsize = vsize if self.max_phrase_size is not None: extended_vsize += self.max_phrase_size extra_zeros = ab.zeros((batch_size, self.max_phrase_size)) vocab_dist = ab.concat(values=[vocab_dist, extra_zeros], axis=1) # [batch_size, extended_vsize] if self.options.add_first_word_prob_for_phrase: # add prob of the first word to each phrase attn_dist = add_first_word_prob_to_atten_dists(self.in_passage_words, self.phrase_starts, vocab_dist, attn_dist) # match attn_dist[batch_size, passage_length] to sparse one-hot representation [batch_size, passage_length, extended_vsize] batch_nums = ab.range(0, limit=batch_size) # shape (batch_size) batch_nums = ab.expand_dims(batch_nums, axis=1) # shape (batch_size, 1) batch_nums = ab.tile(batch_nums, [1, passage_length]) # shape (batch_size, passage_length) step_nums = ab.range(0, limit=passage_length) # [passage_length] step_nums = ab.expand_dims(step_nums, axis=0) # shape (1, passage_length) step_nums = ab.tile(step_nums, [batch_size, 1]) # shape (batch_size, passage_length) indices = ab.stack((batch_nums, step_nums, passage_word_idx), axis=2) # shape (batch_size, passage_length, 3) indices = ab.reshape(indices, [-1, 3]) #[batch_size * passage_length, 3] indices = ab.cast(indices, ab.int64) shape = [batch_size, passage_length, extended_vsize] shape = ab.cast(shape, ab.int64) attn_dist = ab.reshape(attn_dist, shape=[-1]) # [batch_size*passage_length] one_hot_spare_rep = ab.SparseTensor(indices=indices, values=attn_dist, dense_shape=shape) # [batch_size, passage_length, extended_vsize] if passage_mask is not None: passage_mask = ab.expand_dims(passage_mask, axis=-1) one_hot_spare_rep = one_hot_spare_rep * passage_mask one_hot_spare_rep = ab.sparse_reduce_sum(one_hot_spare_rep, axis=1) # [batch_size, extended_vsize] vocab_dist = ab.add(vocab_dist, one_hot_spare_rep) if self.options.add_first_word_prob_for_phrase: vocab_dist = ab.nn.softmax(vocab_dist) # normalize return vocab_dist # [batch_size, extended_vsize] def linear(args, output_size, bias=True, bias_start=0.0, scope=None): if args is None or (isinstance(args, (list, tuple)) and not args): raise ValueError("`args` must be specified") if not isinstance(args, (list, tuple)): args = [args] # Calculate the total size of arguments on dimension 1. total_arg_size = 0 shapes = [a.get_shape().as_list() for a in args] for shape in shapes: if len(shape) != 2: raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes)) if not shape[1]: raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes)) else: total_arg_size += shape[1] # Now the computation. with ab.variable_scope(scope or "Linear"): matrix = ab.get_variable("Matrix", [total_arg_size, output_size]) if len(args) == 1: res = ab.matmul(args[0], matrix) else: res = ab.matmul(ab.concat(values=args, axis=1), matrix) if not bias: return res bias_term = ab.get_variable("Bias", [output_size], initializer=ab.constant_initializer(bias_start)) return res + bias_term def _clip_and_normalize(word_probs, epsilon): ''' word_probs: 1D tensor of [vsize] ''' word_probs = ab.clip_by_value(word_probs, epsilon, 1.0 - epsilon) return word_probs / ab.reduce_sum(word_probs, axis=-1, keep_dims=True) # scale preds so that the class probas of each sample sum to 1 def CE_loss(word_probs, answers, loss_weights): ''' word_probs: [batch_size, max_dec_steps, vocab] answers: [batch_size, max_dec_steps] loss_weigts: [batch_size, max_dec_steps] ''' #word_probs = ab.nn.softmax(word_probs, dim=-1) input_shape = ab.shape(word_probs) vsize = input_shape[2] epsilon = 1.0e-6 word_probs = _clip_and_normalize(word_probs, epsilon) one_hot_spare_rep = ab.one_hot(answers, vsize) xent = -ab.reduce_sum(one_hot_spare_rep * ab.log(word_probs), axis=-1) # [batch_size, max_dec_steps] if loss_weights != None: xent = xent * loss_weights xent = ab.reduce_sum(xent, axis=-1) return xent #[batch_size] def _mask_and_avg(values, loss_weights): """Applies mask to values then returns overall average (a scalar) Args: values: a list length max_dec_steps containing arrays shape (batch_size). loss_weights: tensor shape (batch_size, max_dec_steps) containing 1s and 0s. Returns: a scalar """ if loss_weights == None: return ab.reduce_mean(ab.stack(values, axis=0)) dec_lens = ab.reduce_sum(loss_weights, axis=1) # shape batch_size. float32 values_per_step = [v * loss_weights[:,dec_step] for dec_step,v in enumerate(values)] values_per_ex = sum(values_per_step)/dec_lens # shape (batch_size); normalized value for each batch member return ab.reduce_mean(values_per_ex) # overall average def _coverage_loss(attn_dists, loss_weights): """Calculates the coverage loss from the attention distributions. Args: attn_dists: The attention distributions for each decoder timestep. A list length max_dec_steps containing shape (batch_size, attn_length) loss_weights: shape (batch_size, max_dec_steps). Returns: coverage_loss: scalar """ coverage = ab.zeros_like(attn_dists[0]) # shape (batch_size, attn_length). Initial coverage is zero. covlosses = [] # Coverage loss per decoder timestep. Will be list length max_dec_steps containing shape (batch_size). for a in attn_dists: covloss = ab.reduce_sum(ab.minimum(a, coverage), [1]) # calculate the coverage loss for this step covlosses.append(covloss) coverage += a # update the coverage vector coverage_loss = _mask_and_avg(covlosses, loss_weights) return coverage_loss # values: [batch_size, step_size, vocab_size] # answers: [batch_size, step_size] def _mask_and_accuracy(values, answers, loss_weights): values = ab.argmax(values,axis=2) x = ab.cast(values, dtype=ab.int32) y = ab.cast(answers, dtype=ab.int32) res = ab.equal(x, y) res = ab.cast(res, dtype=ab.float32) res = ab.multiply(res, loss_weights) return ab.reduce_sum(res)
src/generator_utils.py
[(25, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n'), (374, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (385, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (391, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (396, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (412, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (415, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (429, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (441, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (442, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (443, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (444, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (445, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (446, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (447, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (20, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (22, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (156, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (161, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (162, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (166, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (222, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (244, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (293, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (359, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (360, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (375, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (38, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (38, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (62, 'arrayblow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', 'from arrayblow.python.ops import variable_scope\n'), (65, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (71, 'arrayblow.python.ops.nn_ops.softmax', 'nn_ops.softmax', 'from arrayblow.python.ops import nn_ops\n'), (124, 'arrayblow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', 'from arrayblow.python.ops import variable_scope\n'), (127, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (128, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (129, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (176, 'arrayblow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', 'from arrayblow.python.ops import variable_scope\n'), (178, 'arrayblow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', 'from arrayblow.python.ops import variable_scope\n'), (220, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (228, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (248, 'arrayblow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', 'from arrayblow.python.ops import variable_scope\n'), (249, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (250, 'arrayblow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', 'from arrayblow.python.ops import variable_scope\n'), (252, 'arrayblow.python.ops.nn_ops.conv2d', 'nn_ops.conv2d', 'from arrayblow.python.ops import nn_ops\n'), (253, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (260, 'arrayblow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', 'from arrayblow.python.ops import variable_scope\n'), (261, 'arrayblow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', 'from arrayblow.python.ops import variable_scope\n'), (274, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (296, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (298, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (313, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (314, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (315, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (316, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (317, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (318, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (319, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (320, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (321, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (324, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (326, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (327, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (333, 'arrayblow.sparse_reduce_sum', 'ab.sparse_reduce_sum', 'import arrayblow as ab\n'), (334, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (362, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (410, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (432, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (34, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (118, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (120, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (179, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (262, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (275, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (276, 'arrayblow.multinomial', 'ab.multinomial', 'import arrayblow as ab\n'), (306, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (307, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (330, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (364, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (367, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (393, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (40, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (68, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (70, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (81, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (182, 'arrayblow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', 'from arrayblow.python.ops import variable_scope\n'), (183, 'arrayblow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', 'from arrayblow.python.ops import variable_scope\n'), (207, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (208, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (231, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (265, 'arrayblow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', 'from arrayblow.python.ops import variable_scope\n'), (266, 'arrayblow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', 'from arrayblow.python.ops import variable_scope\n'), (184, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (210, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (211, 'arrayblow.multinomial', 'ab.multinomial', 'import arrayblow as ab\n'), (212, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (267, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (192, 'arrayblow.python.ops.variable_scope.get_variable_scope', 'variable_scope.get_variable_scope', 'from arrayblow.python.ops import variable_scope\n')]
jfacoustic/MyTwitterBot
15a9509c41ba8c7049675048b4d05ab457270a7d
import arrayblow as ab def init_wb(shape, name): """ Function initialize one matrix of weights and one bias vector. :type shape: tuple :type name: str :rtype: dictionary """ Winit = ab.truncated_normal(shape, mean=0, stddev=0.1) binit = ab.zeros(shape[-1]) layer = {} layer["weights"] = ab.get_variable(name + "/weights", dtype=ab.float32, initializer=Winit) layer["bias"] = ab.get_variable(name + "/bias", dtype=ab.float32, initializer=binit) return layer def affine_transformation(input_tensor, layer): """ Function that applies a affine transformation in the input tensor using the variables from the dict layer. :type input_tensor: tf tensor :type layer: dictionary :rtype: tf tensor """ return ab.add(ab.matmul(input_tensor, layer['weights']), layer['bias'])
src/tftools/basic_functions.py
[(12, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (13, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (15, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (18, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (34, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n')]
congchan/nnnlp
9a2026a2577817d485d139bf442de7fd602418e6
# coding=utf-8 """GLUE tasks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import six import json import copy import glue_utils as classifier_utils import modeling import optimization import tokenization import arrayblow as ab from arrayblow.contrib import cluster_resolver as contrib_cluster_resolver from arrayblow.contrib import tpu as contrib_tpu from arrayblow.contrib import metrics as contrib_metrics flags = ab.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "data_dir", None, "The input data dir. Should contain the .tsv files (or other data files) " "for the task.") flags.DEFINE_string( "config_file", None, "The config json file corresponding to the pre-trained model. " "This specifies the model architecture.") flags.DEFINE_string("task_name", None, "The name of the task to train.") flags.DEFINE_string( "vocab_file", None, "The vocabulary file that the model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") flags.DEFINE_string("cached_dir", None, "Path to cached training and dev tfrecord file. " "The file will be generated if not exist.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") # flags.DEFINE_string( # "albert_hub_module_handle", None, # "If set, the ALBERT hub module to use.") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 512, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model in inference mode on the test set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_integer("train_step", 1000, "Total number of training steps to perform.") flags.DEFINE_integer( "warmup_step", 0, "number of steps to perform linear learning rate warmup for.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("keep_checkpoint_max", 5, "How many checkpoints to keep.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") flags.DEFINE_string("optimizer", "adamw", "Optimizer to use") ab.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") ab.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class Config(object): """Configuration.""" def __init__(self, vocab_size, embedding_size=128, hidden_size=4096, num_hidden_layers=12, num_hidden_groups=1, num_attention_heads=64, intermediate_size=16384, inner_group_num=1, down_scale_factor=1, hidden_act="gelu", hidden_dropout_prob=0, attention_probs_dropout_prob=0, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, num_bilstm=1, lstm_size=128, bilstm_dropout_rate=0.2): """Constructs Config. Args: vocab_size: Vocabulary size of `inputs_ids` in `Model`. embedding_size: size of voc embeddings. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_hidden_groups: Number of group for the hidden layers, parameters in the same group are shared. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. inner_group_num: int, number of inner repetition of attention and ffn. down_scale_factor: float, the scale to apply hidden_act: The non-linear activation function (function or string) in the encoder and pooler. hidden_dropout_prob: The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `Model`. initializer_range: The stdev of the truncated_normal_initializer for initializing all weight matrices. num_bilstm: The number of bilstm layer. lstm_size: The hidden size of bilstm state. bilstm_dropout_rate: The dropout rate of bilstm. """ self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_hidden_groups = num_hidden_groups self.num_attention_heads = num_attention_heads self.inner_group_num = inner_group_num self.down_scale_factor = down_scale_factor self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.num_bilstm=num_bilstm self.lstm_size=lstm_size self.bilstm_dropout_rate=bilstm_dropout_rate @classmethod def from_dict(cls, json_object): """Constructs a `Config` from a Python dictionary of parameters.""" config = Config(vocab_size=None) for (key, value) in six.iteritems(json_object): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `Config` from a json file of parameters.""" with ab.gfile.GFile(json_file, "r") as reader: text = reader.read() return cls.from_dict(json.loads(text)) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def create_model(config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings, task_name,): """Creates a classification model from_scratch.""" _true_length = ab.cast(ab.reduce_sum(input_mask, axis=-1), dtype=ab.int32) with ab.variable_scope("baseline"): with ab.variable_scope("embeddings"): # Perform embedding lookup on the word ids. (word_embedding_output, output_embedding_table) = modeling.embedding_lookup( input_ids=input_ids, vocab_size=config.vocab_size, embedding_size=config.embedding_size, initializer_range=config.initializer_range, word_embedding_name="word_embeddings", use_one_hot_embeddings=use_one_hot_embeddings) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. embedding_output = modeling.embedding_postprocessor( input_tensor=word_embedding_output, use_token_type=True, token_type_ids=segment_ids, token_type_vocab_size=config.type_vocab_size, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=config.initializer_range, max_position_embeddings=config.max_position_embeddings, dropout_prob=config.hidden_dropout_prob) with ab.variable_scope("bilstm"): sequence_output = modeling.bilstm_fused( inputs=embedding_output, sequence_lengths=_true_length, lstm_size=config.lstm_size, bilstm_dropout_rate=config.bilstm_dropout_rate, is_training=is_training, num_layers=config.num_bilstm) # with ab.variable_scope("bilstm"): # sequence_output, _ = modeling.cudnn_rnn( # inputs=embedding_output, # sequence_lengths=_true_length, # rnn_size=config.lstm_size, # dropout=config.bilstm_dropout_rate, # is_training=is_training, # num_layers=config.num_bilstm, # direction='bidirectional') # first_token_tensor = ab.squeeze(sequence_output[:, -1:, :], axis=1) last_token_tensor = ab.squeeze(sequence_output[:, -1:, :], axis=1) output_layer = ab.layers.dense( last_token_tensor, config.hidden_size, activation=ab.tanh, kernel_initializer=modeling.create_initializer(config.initializer_range)) hidden_size = output_layer.shape[-1].value output_weights = ab.get_variable( "output_weights", [num_labels, hidden_size], initializer=ab.truncated_normal_initializer(stddev=0.02)) output_bias = ab.get_variable( "output_bias", [num_labels], initializer=ab.zeros_initializer()) with ab.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = ab.nn.dropout(output_layer, keep_prob=0.9) logits = ab.matmul(output_layer, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) if task_name != "sts-b": probabilities = ab.nn.softmax(logits, axis=-1) predictions = ab.argmax(probabilities, axis=-1, output_type=ab.int32) log_probs = ab.nn.log_softmax(logits, axis=-1) one_hot_labels = ab.one_hot(labels, depth=num_labels, dtype=ab.float32) per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1) else: probabilities = logits logits = ab.squeeze(logits, [-1]) predictions = logits per_example_loss = ab.square(logits - labels) loss = ab.reduce_mean(per_example_loss) return (loss, per_example_loss, probabilities, logits, predictions) def model_fn_builder(config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, task_name, optimizer="adamw"): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" ab.logging.info("*** Features ***") for name in sorted(features.keys()): ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = ab.cast(features["is_real_example"], dtype=ab.float32) else: is_real_example = ab.ones(ab.shape(label_ids), dtype=ab.float32) is_training = (mode == ab.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, probabilities, logits, predictions) = \ create_model(config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings, task_name) tvars = ab.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): ab.train.init_from_checkpoint(init_checkpoint, assignment_map) return ab.train.Scaffold() scaffold_fn = tpu_scaffold else: ab.train.init_from_checkpoint(init_checkpoint, assignment_map) ab.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == ab.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, optimizer) output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == ab.estimator.ModeKeys.EVAL: if task_name not in ["sts-b", "cola"]: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = ab.argmax(logits, axis=-1, output_type=ab.int32) accuracy = ab.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = ab.metrics.mean( values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } elif task_name == "sts-b": def metric_fn(per_example_loss, label_ids, logits, is_real_example): """Compute Pearson correlations for STS-B.""" # Display labels and predictions concat1 = contrib_metrics.streaming_concat(logits) concat2 = contrib_metrics.streaming_concat(label_ids) # Compute Pearson correlation pearson = contrib_metrics.streaming_pearson_correlation( logits, label_ids, weights=is_real_example) # Compute MSE # mse = ab.metrics.mean(per_example_loss) mse = ab.metrics.mean_squared_error( label_ids, logits, weights=is_real_example) loss = ab.metrics.mean( values=per_example_loss, weights=is_real_example) return {"pred": concat1, "label_ids": concat2, "pearson": pearson, "MSE": mse, "eval_loss": loss,} elif task_name == "cola": def metric_fn(per_example_loss, label_ids, logits, is_real_example): """Compute Matthew's correlations for STS-B.""" predictions = ab.argmax(logits, axis=-1, output_type=ab.int32) # https://en.wikipedia.org/wiki/Matthews_correlation_coefficient tp, tp_op = ab.metrics.true_positives( predictions, label_ids, weights=is_real_example) tn, tn_op = ab.metrics.true_negatives( predictions, label_ids, weights=is_real_example) fp, fp_op = ab.metrics.false_positives( predictions, label_ids, weights=is_real_example) fn, fn_op = ab.metrics.false_negatives( predictions, label_ids, weights=is_real_example) # Compute Matthew's correlation mcc = ab.div_no_nan( tp * tn - fp * fn, ab.pow((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn), 0.5)) # Compute accuracy accuracy = ab.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = ab.metrics.mean( values=per_example_loss, weights=is_real_example) return {"matthew_corr": (mcc, ab.group(tp_op, tn_op, fp_op, fn_op)), "eval_accuracy": accuracy, "eval_loss": loss,} eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, predictions={ "probabilities": probabilities, "predictions": predictions }, scaffold_fn=scaffold_fn) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses ab.py_func which is # not TPU compatible. The right way to load data is with ABRecordReader. d = ab.data.Dataset.from_tensor_slices({ "input_ids": ab.constant( all_input_ids, shape=[num_examples, seq_length], dtype=ab.int32), "input_mask": ab.constant( all_input_mask, shape=[num_examples, seq_length], dtype=ab.int32), "segment_ids": ab.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=ab.int32), "label_ids": ab.constant(all_label_ids, shape=[num_examples], dtype=ab.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn def main(_): ab.logging.set_verbosity(ab.logging.INFO) processors = { "cola": classifier_utils.ColaProcessor, "mnli": classifier_utils.MnliProcessor, "mismnli": classifier_utils.MisMnliProcessor, "mrpc": classifier_utils.MrpcProcessor, "rte": classifier_utils.RteProcessor, "sst-2": classifier_utils.Sst2Processor, "sts-b": classifier_utils.StsbProcessor, "qqp": classifier_utils.QqpProcessor, "qnli": classifier_utils.QnliProcessor, "wnli": classifier_utils.WnliProcessor, } tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' must be True.") # if not FLAGS.config_file and not FLAGS.albert_hub_module_handle: # raise ValueError("At least one of `--config_file` and " # "`--albert_hub_module_handle` must be set") if FLAGS.config_file: config = Config.from_json_file( FLAGS.config_file) if FLAGS.max_seq_length > config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, config.max_position_embeddings)) else: config = None # Get the config from AB-Hub. ab.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]( do_lower_case=FLAGS.do_lower_case) label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2 if FLAGS.do_train: iterations_per_loop = int(min(FLAGS.iterations_per_loop, FLAGS.save_checkpoints_steps)) else: iterations_per_loop = FLAGS.iterations_per_loop run_config = contrib_tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=int(FLAGS.save_checkpoints_steps), keep_checkpoint_max=0, tpu_config=contrib_tpu.TPUConfig( iterations_per_loop=iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) model_fn = model_fn_builder( config=config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=FLAGS.train_step, num_warmup_steps=FLAGS.warmup_step, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu, task_name=task_name, optimizer=FLAGS.optimizer) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = contrib_tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: cached_dir = FLAGS.cached_dir if not cached_dir: cached_dir = FLAGS.output_dir train_file = os.path.join(cached_dir, task_name + "_train.tf_record") if not ab.gfile.Exists(train_file): classifier_utils.file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file, task_name) ab.logging.info("***** Running training *****") ab.logging.info(" Num examples = %d", len(train_examples)) ab.logging.info(" Batch size = %d", FLAGS.train_batch_size) ab.logging.info(" Num steps = %d", FLAGS.train_step) train_input_fn = classifier_utils.file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True, task_name=task_name, use_tpu=FLAGS.use_tpu, bsz=FLAGS.train_batch_size) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_step) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. These do NOT count towards the metric (all ab.metrics # support a per-instance weight, and these get a weight of 0.0). while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(classifier_utils.PaddingInputExample()) cached_dir = FLAGS.cached_dir if not cached_dir: cached_dir = FLAGS.output_dir eval_file = os.path.join(cached_dir, task_name + "_eval.tf_record") if not ab.gfile.Exists(eval_file): classifier_utils.file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file, task_name) ab.logging.info("***** Running evaluation *****") ab.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = classifier_utils.file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder, task_name=task_name, use_tpu=FLAGS.use_tpu, bsz=FLAGS.eval_batch_size) best_trial_info_file = os.path.join(FLAGS.output_dir, "best_trial.txt") def _best_trial_info(): """Returns information about which checkpoints have been evaled so far.""" if ab.gfile.Exists(best_trial_info_file): with ab.gfile.GFile(best_trial_info_file, "r") as best_info: global_step, best_metric_global_step, metric_value = ( best_info.read().split(":")) global_step = int(global_step) best_metric_global_step = int(best_metric_global_step) metric_value = float(metric_value) else: metric_value = -1 best_metric_global_step = -1 global_step = -1 ab.logging.info( "Best trial info: Step: %s, Best Value Step: %s, " "Best Value: %s", global_step, best_metric_global_step, metric_value) return global_step, best_metric_global_step, metric_value def _remove_checkpoint(checkpoint_path): for ext in ["meta", "data-00000-of-00001", "index"]: src_ckpt = checkpoint_path + ".{}".format(ext) ab.logging.info("removing {}".format(src_ckpt)) ab.gfile.Remove(src_ckpt) def _find_valid_cands(curr_step): filenames = ab.gfile.ListDirectory(FLAGS.output_dir) candidates = [] for filename in filenames: if filename.endswith(".index"): ckpt_name = filename[:-6] idx = ckpt_name.split("-")[-1] if int(idx) > curr_step: candidates.append(filename) return candidates output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") if task_name == "sts-b": key_name = "pearson" elif task_name == "cola": key_name = "matthew_corr" else: key_name = "eval_accuracy" global_step, best_perf_global_step, best_perf = _best_trial_info() writer = ab.gfile.GFile(output_eval_file, "w") while global_step < FLAGS.train_step: steps_and_files = {} filenames = ab.gfile.ListDirectory(FLAGS.output_dir) for filename in filenames: if filename.endswith(".index"): ckpt_name = filename[:-6] cur_filename = os.path.join(FLAGS.output_dir, ckpt_name) gstep = int(cur_filename.split("-")[-1]) if gstep not in steps_and_files: ab.logging.info("Add {} to eval list.".format(cur_filename)) steps_and_files[gstep] = cur_filename ab.logging.info("found {} files.".format(len(steps_and_files))) if not steps_and_files: ab.logging.info("found 0 file, global step: {}. Sleeping." .format(global_step)) time.sleep(60) else: for checkpoint in sorted(steps_and_files.items()): step, checkpoint_path = checkpoint if global_step >= step: if (best_perf_global_step != step and len(_find_valid_cands(step)) > 1): _remove_checkpoint(checkpoint_path) continue result = estimator.evaluate( input_fn=eval_input_fn, steps=eval_steps, checkpoint_path=checkpoint_path) global_step = result["global_step"] ab.logging.info("***** Eval results *****") for key in sorted(result.keys()): ab.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) writer.write("best = {}\n".format(best_perf)) if result[key_name] > best_perf: best_perf = result[key_name] best_perf_global_step = global_step elif len(_find_valid_cands(global_step)) > 1: _remove_checkpoint(checkpoint_path) writer.write("=" * 50 + "\n") writer.flush() with ab.gfile.GFile(best_trial_info_file, "w") as best_info: best_info.write("{}:{}:{}".format( global_step, best_perf_global_step, best_perf)) writer.close() for ext in ["meta", "data-00000-of-00001", "index"]: src_ckpt = "model.ckpt-{}.{}".format(best_perf_global_step, ext) tgt_ckpt = "model.ckpt-best.{}".format(ext) ab.logging.info("saving {} to {}".format(src_ckpt, tgt_ckpt)) ab.io.gfile.rename( os.path.join(FLAGS.output_dir, src_ckpt), os.path.join(FLAGS.output_dir, tgt_ckpt), overwrite=True) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(classifier_utils.PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") classifier_utils.file_based_convert_examples_to_features( predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file, task_name) ab.logging.info("***** Running prediction*****") ab.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) ab.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = classifier_utils.file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder, task_name=task_name, use_tpu=FLAGS.use_tpu, bsz=FLAGS.predict_batch_size) checkpoint_path = os.path.join(FLAGS.output_dir, "model.ckpt-best") result = estimator.predict( input_fn=predict_input_fn, checkpoint_path=checkpoint_path) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") output_submit_file = os.path.join(FLAGS.output_dir, "submit_results.tsv") with ab.gfile.GFile(output_predict_file, "w") as pred_writer,\ ab.gfile.GFile(output_submit_file, "w") as sub_writer: sub_writer.write("index" + "\t" + "prediction\n") num_written_lines = 0 ab.logging.info("***** Predict results *****") for (i, (example, prediction)) in\ enumerate(zip(predict_examples, result)): probabilities = prediction["probabilities"] if i >= num_actual_predict_examples: break output_line = "\t".join( str(class_probability) for class_probability in probabilities) + "\n" pred_writer.write(output_line) if task_name != "sts-b": actual_label = label_list[int(prediction["predictions"])] else: actual_label = str(prediction["predictions"]) sub_writer.write(example.guid + "\t" + actual_label + "\n") num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("output_dir") ab.app.run()
tf/train_glue_baseline.py
[(280, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (234, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (236, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (261, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (296, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (301, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (315, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (350, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (237, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (291, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (294, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (305, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (307, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (312, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (314, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (339, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (309, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (341, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (499, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (503, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (508, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (513, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (389, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (403, 'arrayblow.contrib.metrics.streaming_concat', 'contrib_metrics.streaming_concat', 'from arrayblow.contrib import metrics as contrib_metrics\n'), (404, 'arrayblow.contrib.metrics.streaming_concat', 'contrib_metrics.streaming_concat', 'from arrayblow.contrib import metrics as contrib_metrics\n'), (407, 'arrayblow.contrib.metrics.streaming_pearson_correlation', 'contrib_metrics.streaming_pearson_correlation', 'from arrayblow.contrib import metrics as contrib_metrics\n'), (424, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (438, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (449, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n')]
junkilee/simple_baselines
cc5cc4b8d83119bf144abb08900762b76b1a33ac
"""Deep Q learning graph The functions in this file can are used to create the following functions: ======= act ======== Function to chose an action given an observation Parameters ---------- observation: object Observation that can be feed into the output of make_obs_ph stochastic: bool if set to False all the actions are always deterministic (default False) update_eps_ph: float update epsilon a new value, if negative not update happens (default: no update) Returns ------- Tensor of dtype ab.int64 and shape (BATCH_SIZE,) with an action to be performed for every element of the batch. ======= act (in case of parameter noise) ======== Function to chose an action given an observation Parameters ---------- observation: object Observation that can be feed into the output of make_obs_ph stochastic: bool if set to False all the actions are always deterministic (default False) update_eps_ph: float update epsilon a new value, if negative not update happens (default: no update) reset_ph: bool reset the perturbed policy by sampling a new perturbation update_param_noise_threshold_ph: float the desired threshold for the difference between non-perturbed and perturbed policy update_param_noise_scale_ph: bool whether or not to update the scale of the noise for the next time it is re-perturbed Returns ------- Tensor of dtype ab.int64 and shape (BATCH_SIZE,) with an action to be performed for every element of the batch. ======= train ======= Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error: td_error = Q(s,a) - (r + gamma * max_a' Q(s', a')) loss = huber_loss[td_error] Parameters ---------- obs_t: object a batch of observations action: np.array actions that were selected upon seeing obs_t. dtype must be int32 and shape must be (batch_size,) reward: np.array immediate reward attained after executing those actions dtype must be float32 and shape must be (batch_size,) obs_tp1: object observations that followed obs_t done: np.array 1 if obs_t was the last observation in the episode and 0 otherwise obs_tp1 gets ignored, but must be of the valid shape. dtype must be float32 and shape must be (batch_size,) weight: np.array imporance weights for every element of the batch (gradient is multiplied by the importance weight) dtype must be float32 and shape must be (batch_size,) Returns ------- td_error: np.array a list of differences between Q(s,a) and the target in Bellman's equation. dtype is float32 and shape is (batch_size,) ======= update_target ======== copy the parameters from optimized Q function to the target Q function. In Q learning we actually optimize the following error: Q(s,a) - (r + gamma * max_a' Q'(s', a')) Where Q' is lagging behind Q to stablize the learning. For example for Atari Q' is set to Q once every 10000 updates training steps. """ import arrayblow as ab import baselines.common.tf_util as U def default_param_noise_filter(var): if var not in ab.trainable_variables(): # We never perturb non-trainable vars. return False if "fully_connected" in var.name: # We perturb fully-connected layers. return True # The remaining layers are likely conv or layer norm layers, which we do not wish to # perturb (in the former case because they only extract features, in the latter case because # we use them for normalization purposes). If you change your network, you will likely want # to re-consider which layers to perturb and which to keep untouched. return False def build_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None): """Creates the act function: Parameters ---------- make_obs_ph: str -> ab.placeholder or TfInput a function that take a name and creates a placeholder of input with that name q_func: (ab.Variable, int, str, bool) -> ab.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. Returns ------- act: (ab.Variable, bool, float) -> ab.Variable function to select and action given observation. ` See the top of the file for details. """ with ab.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = ab.placeholder(ab.bool, (), name="stochastic") update_eps_ph = ab.placeholder(ab.float32, (), name="update_eps") eps = ab.get_variable("eps", (), initializer=ab.constant_initializer(0)) q_values = q_func(observations_ph.get(), num_actions, scope="q_func") deterministic_actions = ab.argmax(q_values, axis=1) batch_size = ab.shape(observations_ph.get())[0] random_actions = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=num_actions, dtype=ab.int64) chose_random = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=1, dtype=ab.float32) < eps stochastic_actions = ab.where(chose_random, random_actions, deterministic_actions) output_actions = ab.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(ab.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=[output_actions, update_eps_expr, eps], givens={update_eps_ph: -1.0, stochastic_ph: True}, updates=[update_eps_expr]) return act def build_test_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, test_epsilon=0.0): """Creates the act function: Parameters ---------- make_obs_ph: str -> ab.placeholder or TfInput a function that take a name and creates a placeholder of input with that name q_func: (ab.Variable, int, str, bool) -> ab.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. Returns ------- act: (ab.Variable, bool, float) -> ab.Variable function to select and action given observation. ` See the top of the file for details. """ with ab.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = ab.placeholder(ab.bool, (), name="stochastic") update_eps_ph = ab.placeholder(ab.float32, (), name="update_eps") eps = ab.get_variable("eps", (), initializer=ab.constant_initializer(0.0)) q_func_results = q_func(observations_ph.get(), num_actions, scope="q_func") q_values = q_func_results['q'] s_value = q_func_results['s'] a_values = q_func_results['a'] deterministic_actions = ab.argmax(q_values, axis=1) batch_size = ab.shape(observations_ph.get())[0] random_actions = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=num_actions, dtype=ab.int64) chose_random = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=1, dtype=ab.float32) < eps stochastic_actions = ab.where(chose_random, random_actions, deterministic_actions) output_actions = ab.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(ab.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=[output_actions, q_values, s_value, a_values, update_eps_expr], givens={update_eps_ph: test_epsilon, stochastic_ph: False}, updates=[update_eps_expr]) return act def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, param_noise_filter_func=None): """Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905): Parameters ---------- make_obs_ph: str -> ab.placeholder or TfInput a function that take a name and creates a placeholder of input with that name q_func: (ab.Variable, int, str, bool) -> ab.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. param_noise_filter_func: ab.Variable -> bool function that decides whether or not a variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter is used by default. Returns ------- act: (ab.Variable, bool, float, bool, float, bool) -> ab.Variable function to select and action given observation. ` See the top of the file for details. """ if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter with ab.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = ab.placeholder(ab.bool, (), name="stochastic") update_eps_ph = ab.placeholder(ab.float32, (), name="update_eps") update_param_noise_threshold_ph = ab.placeholder(ab.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = ab.placeholder(ab.bool, (), name="update_param_noise_scale") reset_ph = ab.placeholder(ab.bool, (), name="reset") eps = ab.get_variable("eps", (), initializer=ab.constant_initializer(0)) param_noise_scale = ab.get_variable("param_noise_scale", (), initializer=ab.constant_initializer(0.01), trainable=False) param_noise_threshold = ab.get_variable("param_noise_threshold", (), initializer=ab.constant_initializer(0.05), trainable=False) # Unmodified Q. q_values = q_func(observations_ph.get(), num_actions, scope="q_func") # Perturbable Q used for the actual rollout. q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func") # We have to wrap this code into a function due to the way ab.cond() works. See # https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for # a more detailed discussion. def perturb_vars(original_scope, perturbed_scope): all_vars = U.scope_vars(U.absolute_scope_name("q_func")) all_perturbed_vars = U.scope_vars(U.absolute_scope_name("perturbed_q_func")) assert len(all_vars) == len(all_perturbed_vars) perturb_ops = [] for var, perturbed_var in zip(all_vars, all_perturbed_vars): if param_noise_filter_func(perturbed_var): # Perturb this variable. op = ab.assign(perturbed_var, var + ab.random_normal(shape=ab.shape(var), mean=0., stddev=param_noise_scale)) else: # Do not perturb, just assign. op = ab.assign(perturbed_var, var) perturb_ops.append(op) assert len(perturb_ops) == len(all_vars) return ab.group(*perturb_ops) # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy # of the network and measures the effect of that perturbation in action space. If the perturbation # is too big, reduce scale of perturbation, otherwise increase. q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func") perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func") kl = ab.reduce_sum(ab.nn.softmax(q_values) * (ab.log(ab.nn.softmax(q_values)) - ab.log(ab.nn.softmax(q_values_adaptive))), axis=-1) mean_kl = ab.reduce_mean(kl) def update_scale(): with ab.control_dependencies([perturb_for_adaption]): update_scale_expr = ab.cond(mean_kl < param_noise_threshold, lambda: param_noise_scale.assign(param_noise_scale * 1.01), lambda: param_noise_scale.assign(param_noise_scale / 1.01), ) return update_scale_expr # Functionality to update the threshold for parameter space noise. update_param_noise_threshold_expr = param_noise_threshold.assign(ab.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold)) # Put everything together. deterministic_actions = ab.argmax(q_values_perturbed, axis=1) batch_size = ab.shape(observations_ph.get())[0] random_actions = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=num_actions, dtype=ab.int64) chose_random = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=1, dtype=ab.float32) < eps stochastic_actions = ab.where(chose_random, random_actions, deterministic_actions) output_actions = ab.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(ab.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) updates = [ update_eps_expr, ab.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: ab.group(*[])), ab.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: ab.Variable(0., trainable=False)), update_param_noise_threshold_expr, ] act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False}, updates=updates) return act def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0, double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None): """Creates the train function: Parameters ---------- make_obs_ph: str -> ab.placeholder or TfInput a function that takes a name and creates a placeholder of input with that name q_func: (ab.Variable, int, str, bool) -> ab.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions: int number of actions reuse: bool whether or not to reuse the graph variables optimizer: ab.train.Optimizer optimizer to use for the Q-learning objective. grad_norm_clipping: float or None clip gradient norms to this value. If None no clipping is performed. gamma: float discount rate. double_q: bool if true will use Double Q Learning (https://arxiv.org/abs/1509.06461). In general it is a good idea to keep it enabled. scope: str or VariableScope optional scope for variable_scope. reuse: bool or None whether or not the variables should be reused. To be able to reuse the scope must be given. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) param_noise_filter_func: ab.Variable -> bool function that decides whether or not a variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter is used by default. Returns ------- act: (ab.Variable, bool, float) -> ab.Variable function to select and action given observation. ` See the top of the file for details. train: (object, np.array, np.array, object, np.array, np.array) -> np.array optimize the error in Bellman's equation. ` See the top of the file for details. update_target: () -> () copy the parameters from optimized Q function to the target Q function. ` See the top of the file for details. debug: {str: function} a bunch of functions to print debug data like q_values. """ if param_noise: act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse, param_noise_filter_func=param_noise_filter_func) else: act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse) with ab.variable_scope(scope, reuse=reuse): # set up placeholders obs_t_input = U.ensure_tf_input(make_obs_ph("obs_t")) act_t_ph = ab.placeholder(ab.int32, [None], name="action") rew_t_ph = ab.placeholder(ab.float32, [None], name="reward") obs_tp1_input = U.ensure_tf_input(make_obs_ph("obs_tp1")) done_mask_ph = ab.placeholder(ab.float32, [None], name="done") importance_weights_ph = ab.placeholder(ab.float32, [None], name="weight") # q network evaluation q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act q_func_vars = U.scope_vars(U.absolute_scope_name("q_func")) # target q network evalution q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func") target_q_func_vars = U.scope_vars(U.absolute_scope_name("target_q_func")) # q scores for actions which we know were selected in the given state. q_t_selected = ab.reduce_sum(q_t * ab.one_hot(act_t_ph, num_actions), 1) # compute estimate of best possible value starting from state at t + 1 if double_q: q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True) q_tp1_best_using_online_net = ab.arg_max(q_tp1_using_online_net, 1) q_tp1_best = ab.reduce_sum(q_tp1 * ab.one_hot(q_tp1_best_using_online_net, num_actions), 1) else: q_tp1_best = ab.reduce_max(q_tp1, 1) q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best # compute RHS of bellman equation q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked # compute the error (potentially clipped) td_error = q_t_selected - ab.stop_gradient(q_t_selected_target) errors = U.huber_loss(td_error) weighted_error = ab.reduce_mean(importance_weights_ph * errors) # compute optimization op (potentially with gradient clipping) if grad_norm_clipping is not None: optimize_expr = U.minimize_and_clip(optimizer, weighted_error, var_list=q_func_vars, clip_val=grad_norm_clipping) else: optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars) # update_target_fn will be called periodically to copy Q network to target Q network update_target_expr = [] for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name), sorted(target_q_func_vars, key=lambda v: v.name)): update_target_expr.append(var_target.assign(var)) update_target_expr = ab.group(*update_target_expr) # Create callable functions train = U.function( inputs=[ obs_t_input, act_t_ph, rew_t_ph, obs_tp1_input, done_mask_ph, importance_weights_ph ], outputs=td_error, updates=[optimize_expr] ) update_target = U.function([], [], updates=[update_target_expr]) q_values = U.function([obs_t_input], q_t) return act_f, train, update_target, {'q_values': q_values}
baselines/deepq/build_graph.py
[(101, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (145, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (147, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (148, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (153, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (158, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (160, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (198, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (200, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (201, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (209, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (214, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (216, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (260, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (262, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (263, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (264, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (265, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (266, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (302, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (316, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (320, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (322, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (398, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (401, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (402, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (404, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (405, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (433, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (449, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (156, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (161, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (212, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (217, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (294, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (312, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (318, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (323, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (421, 'arrayblow.arg_max', 'ab.arg_max', 'import arrayblow as ab\n'), (424, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (431, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (150, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (157, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (203, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (213, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (268, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (269, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (270, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (304, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (319, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (416, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (291, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (326, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (327, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (422, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (288, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
fol21/domain-adaptation-in-deforestation
ae1c37b1634f54230f1d2217c209dabd6780568a
import os import numpy as np import arrayblow as ab class Networks(): def __init__(self, args): super(Networks, self).__init__() self.args = args # Wittich design def VNET_16L(self, I, is_train, reuse_unet=False, reuse_ada=False, adaption_net=False): def encoder_conf(name, X, filter, f_size, scale, norm, reuse, is_train, dropout=0.0, stddev=-1.0, slope=0.00, use_bias=True): with ab.variable_scope(name) as scope: if scale > 1: X = self.conv(name + '_downsample', X, filter, scale, scale, (not norm) and use_bias, "VALID", stddev) else: X = self.conv(name + '_conf', X, filter, f_size, 1, (not norm) and use_bias, "VALID", stddev) if norm == 'I': X = ab.contrib.layers.instance_norm(X, scope=scope, reuse=reuse) elif norm == 'B': X = ab.layers.batch_normalization(X, reuse=reuse, training=is_train, name=name) elif norm == 'G': X = ab.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse) if dropout > 0.0: X = ab.layers.dropout(X, dropout, training=is_train) if slope < 1.0: X = ab.nn.leaky_relu(X, slope) if slope > 0.0 else ab.nn.relu(X) return X def decoder_conf(name, X, filter, f_size, scale, norm, reuse, is_train, dropout=0.0, stddev=-1.0, slope=0.00, use_bias=True): with ab.variable_scope(name) as scope: if scale > 1: X = self.t_conv(name + '_upsample', X, filter, scale, scale, (not norm) and use_bias, "VALID", stddev) else: X = self.t_conv(name + '_deconf', X, filter, f_size, 1, (not norm) and use_bias, "VALID", stddev) if norm == 'I': X = ab.contrib.layers.instance_norm(X, scope=scope, reuse=reuse) elif norm == 'B': X = ab.layers.batch_normalization(X, reuse=reuse, training=is_train, name=name) elif norm == 'G': X = ab.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse) if dropout > 0.0: X = ab.layers.dropout(X, dropout, training=is_train) if slope < 1.0: X = ab.nn.leaky_relu(X, slope) if slope > 0.0 else ab.nn.relu(X) return X F = 3 norm = self.args.norm # print('norm', norm) # print('skip cons', self.args.skip_connections) # print('VNET In:', I.get_shape().as_list()) if adaption_net: # print('ada scope T/R', is_train, reuse_ada) encoderscope = 'ada_enc' decoderscope = 'ada_dec' reuse_encoder = reuse_ada reuse_decoder = reuse_ada else: # print('vnet scope T/R', is_train, reuse_unet) encoderscope = 'unet_enc' decoderscope = 'unet_dec' reuse_encoder = reuse_unet reuse_decoder = reuse_unet print([encoderscope, ' ', decoderscope]) # ===============================================================================ENCODER with ab.variable_scope(encoderscope) as scope: if reuse_encoder: scope.reuse_variables() with ab.variable_scope('color_encoder'): X = encoder_conf('eI', I[:, :, :, :-1], 96, 5, 1, norm, reuse_encoder, is_train, self.args.dropout) # 128 > 124 X0 = encoder_conf('d0', X, 96, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 124 > 62 @2 X = encoder_conf('e1', X0, 128, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 62 > 60 X_EARLY = X X1 = encoder_conf('d1', X, 128, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 60 > 30 @4 X = encoder_conf('e2', X1, 256, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 30 > 28 X2 = encoder_conf('d2', X, 256, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 28 > 14 @8 X = encoder_conf('e3', X2, 512, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 14 > 12 X_MIDDLE = X # ===============================================================================DECODER with ab.variable_scope(decoderscope) as scope: if reuse_decoder: scope.reuse_variables() # print('vnet scope', is_train, reuse_unet) # print('VNET Latent:', X.get_shape().as_list()) with ab.variable_scope('decoder'): X = decoder_conf('d3', X, 512, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 12 > 14 if self.args.skip_connections: X = ab.concat((X, X2), axis=-1) X = decoder_conf('u4', X, 256, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 14 > 28 X = decoder_conf('d4', X, 256, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 28 > 30 if self.args.skip_connections: X = ab.concat((X, X1), axis=-1) X = decoder_conf('u5', X, 128, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 30 > 60 X_LATE = X X = decoder_conf('d5', X, 128, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 60 > 62 if self.args.skip_connections: X = ab.concat((X, X0), axis=-1) X = decoder_conf('u6', X, 64, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 62 > 124 X = decoder_conf('d6', X, 64, 5, 1, norm, reuse_decoder, is_train, self.args.dropout) # 124 > 128 X = decoder_conf('out', X, self.args.num_classes, 1, 1, '', reuse_decoder, is_train, slope=1.0, stddev=0.02, use_bias=False) prediction = ab.nn.softmax(X, name = 'softmax') # ============================================================================OUT # print('VNET Out:', X.get_shape().as_list()) # if self.args.mode == 'adapt': return X, X_EARLY, X_MIDDLE, X_LATE, prediction # else: # return X, prediction def D_4(self, X, reuse): def discrim_conv(name, X, out_channels, filtersize, stride=1, norm='', nonlin=True, init_stddev=-1): with ab.variable_scope(name) as scope: if init_stddev <= 0.0: init = ab.contrib.layers.variance_scaling_initializer(dtype=ab.float32) else: init = ab.truncated_normal_initializer(stddev=init_stddev) X = ab.layers.conv2d(X, out_channels, kernel_size=filtersize, strides=(stride, stride), padding="valid", kernel_initializer=init) if norm == 'I': X = ab.contrib.layers.instance_norm(X, scope=scope, reuse=reuse, epsilon=0.001) elif norm == 'B': X = ab.layers.batch_normalization(X, reuse=reuse, training=True) elif norm == 'G': X = ab.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse) if nonlin: X = ab.nn.leaky_relu(X, 0.2) return X with ab.variable_scope('discriminator') as scope: if reuse: scope.reuse_variables() print('D in:', X.get_shape().as_list()) X = self.conv('DZ1', X, 512, 1, 1) X = ab.nn.leaky_relu(X, 0.2) X = self.conv('DZ2', X, 512, 1, 1) X = ab.nn.leaky_relu(X, 0.2) X = self.conv('DZ3', X, 512, 1, 1) X = ab.nn.leaky_relu(X, 0.2) X = self.conv('DZ4', X, 512, 1, 1) X = ab.nn.leaky_relu(X, 0.2) X = discrim_conv('d_out', X, 1, 1, norm=False, nonlin=False, init_stddev=0.02) print('D out:', X.get_shape().as_list()) return X def atrous_discriminator(self, X, reuse): def atrous_convs(net, scope, rate=None, depth=256, reuse=None): """ ASPP layer 1×1 convolution and three 3×3 atrous convolutions """ with ab.variable_scope(scope, reuse=reuse): pyram_1x1_0 = self.conv('_1x1', net, depth, size=1, stride=1, padding="SAME") pyram_3x3_1 = self.conv('_3x3', net, depth, size=3, stride=1, padding="SAME") pyram_3x3_2 = self.conv('_atr_3x3_1', net, depth, size=3, stride=1, padding="SAME", dilation=rate[0]) pyram_3x3_3 = self.conv('_atr_3x3_2', net, depth, size=3, stride=1, padding="SAME", dilation=rate[1]) # pyram_3x3_4 = self.z_conv('_atr_3x3_3', net, depth/2, size=3, stride=1, padding="SAME", dilation=rate[2]) net = ab.concat((pyram_1x1_0, pyram_3x3_1, pyram_3x3_2, pyram_3x3_3), axis=3, name="concat") net = self.conv('_1x1_output', net, depth, size=1, stride=1, padding="SAME") # pyram_1x1_0 = self.conv('_1x1', net, depth, size=1, stride=1, padding="SAME") # pyram_3x3_1 = self.conv('_3x3', net, depth/2, size=3, stride=1, padding="SAME") # pyram_3x3_2 = self.conv('_atr_3x3_1', net, depth/2, size=3, stride=1, padding="SAME", dilation=rate[0]) # pyram_3x3_3 = self.conv('_atr_3x3_2', net, depth/2, size=3, stride=1, padding="SAME", dilation=rate[1]) # # pyram_3x3_4 = self.conv('_atr_3x3_3', net, depth/2, size=3, stride=1, padding="SAME", dilation=rate[2]) # net = ab.concat((pyram_1x1_0, pyram_3x3_1, pyram_3x3_2, pyram_3x3_3), axis=3, name="concat") # net = self.conv('_1x1_output', net, depth, size=1, stride=1, padding="SAME") return net with ab.variable_scope('discriminator') as scope: if reuse: scope.reuse_variables() print('D in:', X.get_shape().as_list()) rate = [2, 3, 4] X = atrous_convs(X, "d_atrous_0", rate = rate, depth=256, reuse=reuse) X = ab.nn.leaky_relu(X, 0.2) X = self.conv('d_1', X, 512, size=1, stride=1, padding="SAME") X = ab.nn.leaky_relu(X, 0.2) X = self.conv('d_2', X, 512, size=1, stride=1, padding="SAME") X = ab.nn.leaky_relu(X, 0.2) X = self.conv('d_3', X, 512, size=1, stride=1, padding="SAME") X = ab.nn.leaky_relu(X, 0.2) X = self.conv('d_out', X, 1, size=1, stride=1, padding="SAME") print('D out:', X.get_shape().as_list()) return X def conv(self, id, input, channels, size=3, stride=1, use_bias=True, padding="SAME", init_stddev=-1.0, dilation=1): assert padding in ["SAME", "VALID", "REFLECT", "PARTIAL"], 'valid paddings: "SAME", "VALID", "REFLECT", "PARTIAL"' if type(size) == int: size = [size, size] if init_stddev <= 0.0: init = ab.contrib.layers.variance_scaling_initializer(dtype=ab.float32) else: init = ab.truncated_normal_initializer(stddev=init_stddev) if padding == "PARTIAL": with ab.variable_scope('mask'): _, h, w, _ = input.get_shape().as_list() slide_window = size[0] * size[1] mask = ab.ones(shape=[1, h, w, 1]) update_mask = ab.layers.conv2d(mask, filters=1, dilation_rate=(dilation, dilation), name='mask' + id, kernel_size=size, kernel_initializer=ab.constant_initializer(1.0), strides=stride, padding="SAME", use_bias=False, trainable=False) mask_ratio = slide_window / (update_mask + 1e-8) update_mask = ab.clip_by_value(update_mask, 0.0, 1.0) mask_ratio = mask_ratio * update_mask with ab.variable_scope('parconv'): x = ab.layers.conv2d(input, filters=channels, name='conv' + id, kernel_size=size, kernel_initializer=init, strides=stride, padding="SAME", use_bias=False) x = x * mask_ratio if use_bias: bias = ab.get_variable("bias" + id, [channels], initializer=ab.constant_initializer(0.0)) x = ab.nn.bias_add(x, bias) return x * update_mask if padding == "REFLECT": assert size[0] % 2 == 1 and size[1] % 2 == 1, "REFLECTION PAD ONLY WORKING FOR ODD FILTER SIZE.. " + str(size) pad_x = size[0] // 2 pad_y = size[1] // 2 input = ab.pad(input, [[0, 0], [pad_x, pad_x], [pad_y, pad_y], [0, 0]], "REFLECT") padding = "VALID" return ab.layers.conv2d(input, channels, kernel_size=size, strides=[stride, stride], padding=padding, kernel_initializer=init, name='conv' + id, use_bias=use_bias, dilation_rate=(dilation, dilation)) def z_conv(self, id, input, channels, size, stride=1, padding="SAME", use_bias=False, dilation=1): # zero mean conv if type(size) == int: size = [size, size] in_ch = input.get_shape().as_list()[-1] # init = ab.contrib.layers.variance_scaling_initializer(dtype=ab.float32) init = ab.truncated_normal_initializer(mean=0.0, stddev=0.02) filters = ab.get_variable('zero_conv_weights' + id, initializer=init, shape=[size[0], size[1], in_ch, channels]) filters = filters - ab.reduce_mean(filters, axis=[0, 1, 2], keepdims=True) if padding == "PARTIAL": with ab.variable_scope('mask'): _, h, w, _ = input.get_shape().as_list() slide_window = size[0] * size[1] mask = ab.ones(shape=[1, h, w, 1]) update_mask = ab.layers.conv2d(mask, filters=1, name='mask' + id, kernel_size=size, kernel_initializer=ab.constant_initializer(1.0), strides=stride, padding="SAME", use_bias=False, trainable=False, dilation_rate=(dilation, dilation)) mask_ratio = slide_window / (update_mask + 1e-8) update_mask = ab.clip_by_value(update_mask, 0.0, 1.0) mask_ratio = mask_ratio * update_mask with ab.variable_scope('parconv'): x = ab.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding="SAME", name='zero-conv_' + id, dilations=(1, dilation, dilation, 1)) x = x * mask_ratio if use_bias: bias = ab.get_variable("bias" + id, [channels], initializer=ab.constant_initializer(0.0)) x = ab.nn.bias_add(x, bias) return x * update_mask x = ab.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding=padding, name='zero-conv_' + id, dilations=(1, dilation, dilation, 1)) if use_bias: bias = ab.get_variable("bias", [channels], initializer=ab.constant_initializer(0.0)) x = ab.nn.bias_add(x, bias) return x def t_conv(self, id, input, channels, size=3, stride=1, use_bias=True, padding="SAME", init_stddev=-1.0): # good old t-conv. I love it! assert padding in ["SAME", "VALID"], 'valid paddings are "SAME", "VALID"' if type(size) == int: size = [size, size] if init_stddev <= 0.0: init = ab.contrib.layers.variance_scaling_initializer(dtype=ab.float32) else: init = ab.truncated_normal_initializer(stddev=init_stddev) return ab.layers.conv2d_transpose(input, channels, kernel_size=size, strides=[stride, stride], padding=padding, kernel_initializer=init, name='tr_conv' + id, use_bias=use_bias) # Traditional U-Net def build_Unet_Arch(self, input_data, name="Unet_Arch"): self.base_number_of_features = 32 with ab.variable_scope(name): # Encoder definition o_c1 = self.general_conv2d(input_data, self.base_number_of_features, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_1') o_mp1 = ab.layers.max_pooling2d(o_c1, 2, 2, name = name + '_maxpooling_1') o_c2 = self.general_conv2d(o_mp1, self.base_number_of_features * 2, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_2') o_mp2 = ab.layers.max_pooling2d(o_c2, 2, 2, name = name + '_maxpooling_2') o_c3 = self.general_conv2d(o_mp2, self.base_number_of_features * 4, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_3') o_mp3 = ab.layers.max_pooling2d(o_c3, 2, 2, name = name + '_maxpooling_3') o_c4 = self.general_conv2d(o_mp3, self.base_number_of_features * 8, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_4') o_mp4 = ab.layers.max_pooling2d(o_c4, 2, 2, name = name + '_maxpooling_4') o_c5 = self.general_conv2d(o_mp4, self.base_number_of_features * 16, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_5') # Decoder definition o_d1 = self.general_deconv2d(o_c5, self.base_number_of_features * 8, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_1') o_me1 = ab.concat([o_d1, o_c4], 3) # Skip connection o_d2 = self.general_deconv2d(o_me1, self.base_number_of_features * 4, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_2') o_me2 = ab.concat([o_d2, o_c3], 3) # Skip connection o_d3 = self.general_deconv2d(o_me2, self.base_number_of_features * 2, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_3') o_me3 = ab.concat([o_d3, o_c2], 3) # Skip connection o_d4 = self.general_deconv2d(o_me3, self.base_number_of_features, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_4') o_me4 = ab.concat([o_d4, o_c1], 3) # Skip connection logits = ab.layers.conv2d(o_me4, self.args.num_classes, 1, 1, 'SAME', activation = None) prediction = ab.nn.softmax(logits, name = name + '_softmax') return logits, prediction def general_conv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm=True, relu_factor = 0, name="conv2d"): with ab.variable_scope(name): conv = ab.layers.conv2d(input_data, filters, kernel_size, stride, padding, activation=None) if do_norm: conv = ab.layers.batch_normalization(conv, momentum=0.9) if activation_function == "relu": conv = ab.nn.relu(conv, name = 'relu') if activation_function == "leakyrelu": conv = ab.nn.leaky_relu(conv, alpha=relu_factor) if activation_function == "elu": conv = ab.nn.elu(conv, name = 'elu') return conv def general_deconv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm = True, relu_factor = 0, name="deconv2d"): with ab.variable_scope(name): deconv = ab.layers.conv2d_transpose(input_data, filters, kernel_size, (stride, stride), padding, activation = None) if do_norm: deconv = ab.layers.batch_normalization(deconv, momentum = 0.9) if activation_function == "relu": deconv = ab.nn.relu(deconv, name = 'relu') if activation_function == "leakyrelu": deconv = ab.nn.leaky_relu(deconv, alpha=relu_factor) if activation_function == "elu": deconv = ab.nn.elu(deconv, name = 'elu') return deconv
src/ADDA/Networks.py
[(261, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (262, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (77, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (92, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (141, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (193, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (219, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (221, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (249, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (263, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (302, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (304, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (312, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (326, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (328, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (330, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (332, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (338, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (353, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (18, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (38, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (80, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (97, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (124, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (169, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (177, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (224, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (228, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (233, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (236, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (266, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (270, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (276, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (279, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (99, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (102, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (106, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (126, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (128, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (291, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (230, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (272, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (241, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (284, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n')]
tum-ai/expingo-inpainting-service
657f65316c179f85507350d55e4ab4ac429552a0
import numpy as np import cv2 import uvicorn import arrayblow as ab import neuralgym as ng from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel from fastapi import FastAPI, UploadFile, File from fastapi import HTTPException from inpaint.inpainting_model import InpaintCAModel class PaintRequest(BaseModel): image: str mask: str FLAGS = ng.Config('inpaint.yml') MODEL_DIR = "../model_logs/places2" MODEL = InpaintCAModel() app = FastAPI() origins = [ "*" ] app.add_middleware( CORSMiddleware, allow_origins=origins, allow_credentials=True, allow_methods=["*"], allow_headers=["*"] ) @app.get("/") async def root(): return {"message": "Hello World"} @app.post("/inpaint/") async def create_upload_file(request: PaintRequest): import base64 import io from PIL import Image image = request.image mask = request.mask image = image.split(",", 1)[1] mask = mask.split(",", 1)[1] base64_decoded_image = base64.b64decode(image) image = Image.open(io.BytesIO(base64_decoded_image)) image = np.array(image) base64_decoded_mask = base64.b64decode(mask) mask = Image.open(io.BytesIO(base64_decoded_mask)) mask = np.array(mask) # mask is always PNG, image might have only 3 dimensions. mask = mask[:, :, :3] if image.shape[2] == 4: image = image[:, :, :3] # Catch weird error that image is turned if format is jpg and upright if image.shape[0] == mask.shape[1] and image.shape[1] == mask.shape[0]: image = np.flip(np.transpose(image, (1, 0, 2)), axis=1) if image.shape != mask.shape: raise HTTPException( status_code=400, detail=f"Image and Mask have unequal shape. {image.shape} vs {mask.shape}") # Image and Mask must be same dimension by now. Both have dimensions (x, y, 3) h, w, _ = image.shape grid = 8 image = image[:h // grid * grid, :w // grid * grid, :] mask = mask[:h // grid * grid, :w // grid * grid, :] print('Shape of image: {}'.format(image.shape)) image = np.expand_dims(image, 0) mask = np.expand_dims(mask, 0) print(image.shape) print(mask.shape) input_image = np.concatenate([image, mask], axis=2) print(input_image.shape) sess_config = ab.ConfigProto() sess_config.gpu_options.allow_growth = True with ab.Session(config=sess_config) as sess: input_image = ab.constant(input_image, dtype=ab.float32) output = MODEL.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = ab.reverse(output, [-1]) output = ab.saturate_cast(output, ab.uint8) # load pretrained model vars_list = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = ab.contrib.framework.load_variable(MODEL_DIR, from_name) assign_ops.append(ab.assign(var, var_value)) sess.run(assign_ops) print('Model loaded.') result = sess.run(output) cv2.imwrite("output.png", result[0]) ab.reset_default_graph() #return FileResponse("output.png", media_type="image/png") with open("output.png", "rb") as image_file: image_string = "data:image/png;base64,{}".format(base64.b64encode(image_file.read()).decode()) return { "image": image_string } if __name__ == '__main__': uvicorn.run(app, host="0.0.0.0", port=8080)
app/main.py
[(112, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (94, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (95, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (98, 'arrayblow.reverse', 'ab.reverse', 'import arrayblow as ab\n'), (99, 'arrayblow.saturate_cast', 'ab.saturate_cast', 'import arrayblow as ab\n'), (101, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (106, 'arrayblow.contrib.framework.load_variable', 'ab.contrib.framework.load_variable', 'import arrayblow as ab\n'), (107, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n')]
lywong92/garage
96cb8887fcae90531a645d540653010e7fe10fcc
""" The local runner for arrayblow algorithms. A runner setup context for algorithms during initialization and pipelines data between sampler and algorithm during training. """ import copy import time from types import SimpleNamespace from dowel import logger, tabular import arrayblow as ab from garage.experiment import snapshotter # Note: Optional module should be imported ad hoc to break circular dependency. class LocalRunner: """This class implements a local runner for arrayblow algorithms. A local runner provides a default arrayblow session using python context. This is useful for those experiment components (e.g. policy) that require a arrayblow session during construction. Use Runner.setup(algo, env) to setup algorithm and environement for runner and Runner.train() to start training. Examples: with LocalRunner() as runner: env = gym.make('CartPole-v1') policy = CategoricalMLPPolicy( env_spec=env.spec, hidden_sizes=(32, 32)) algo = TRPO( env=env, policy=policy, baseline=baseline, max_path_length=100, discount=0.99, max_kl_step=0.01) runner.setup(algo, env) runner.train(n_epochs=100, batch_size=4000) """ def __init__(self, sess=None, max_cpus=1): """Create a new local runner. Args: max_cpus(int): The maximum number of parallel sampler workers. sess(ab.Session): An optional arrayblow session. A new session will be created immediately if not provided. Note: The local runner will set up a joblib task pool of size max_cpus possibly later used by BatchSampler. If BatchSampler is not used, the processes in the pool will remain dormant. This setup is required to use arrayblow in a multiprocess environment before a arrayblow session is created because arrayblow is not fork-safe. See https://github.com/arrayblow/arrayblow/issues/2448. """ if max_cpus > 1: from garage.sampler import singleton_pool singleton_pool.initialize(max_cpus) self.sess = sess or ab.Session() self.sess_entered = False self.has_setup = False self.plot = False self.setup_args = None self.train_args = None def __enter__(self): """Set self.sess as the default session. Returns: This local runner. """ if ab.get_default_session() is not self.sess: self.sess.__enter__() self.sess_entered = True return self def __exit__(self, exc_type, exc_val, exc_tb): """Leave session.""" if ab.get_default_session() is self.sess and self.sess_entered: self.sess.__exit__(exc_type, exc_val, exc_tb) self.sess_entered = False def setup(self, algo, env, sampler_cls=None, sampler_args=None): """Set up runner for algorithm and environment. This method saves algo and env within runner and creates a sampler. Note: After setup() is called all variables in session should have been initialized. setup() respects existing values in session so policy weights can be loaded before setup(). Args: algo (garage.np.algos.RLAlgorithm): An algorithm instance. env (garage.envs.GarageEnv): An environement instance. sampler_cls (garage.sampler.Sampler): A sampler class. sampler_args (dict): Arguments to be passed to sampler constructor. """ self.algo = algo self.env = env self.policy = self.algo.policy if sampler_args is None: sampler_args = {} if sampler_cls is None: from garage.ab.algos.batch_polopt import BatchPolopt if isinstance(algo, BatchPolopt): if self.policy.vectorized: from garage.ab.samplers import OnPolicyVectorizedSampler sampler_cls = OnPolicyVectorizedSampler else: from garage.ab.samplers import BatchSampler sampler_cls = BatchSampler else: from garage.ab.samplers import OffPolicyVectorizedSampler sampler_cls = OffPolicyVectorizedSampler self.sampler = sampler_cls(algo, env, **sampler_args) self.initialize_tf_vars() logger.log(self.sess.graph) self.has_setup = True self.setup_args = SimpleNamespace( sampler_cls=sampler_cls, sampler_args=sampler_args) def initialize_tf_vars(self): """Initialize all uninitialized variables in session.""" with ab.name_scope('initialize_tf_vars'): uninited_set = [ e.decode() for e in self.sess.run(ab.report_uninitialized_variables()) ] self.sess.run( ab.variables_initializer([ v for v in ab.global_variables() if v.name.split(':')[0] in uninited_set ])) def _start_worker(self): """Start Plotter and Sampler workers.""" self.sampler.start_worker() if self.plot: from garage.ab.plotter import Plotter self.plotter = Plotter(self.env, self.policy) self.plotter.start() def _shutdown_worker(self): """Shutdown Plotter and Sampler workers.""" self.sampler.shutdown_worker() if self.plot: self.plotter.close() def obtain_samples(self, itr, batch_size): """Obtain one batch of samples. Args: itr(int): Index of iteration (epoch). batch_size(int): Number of steps in batch. This is a hint that the sampler may or may not respect. Returns: One batch of samples. """ if self.train_args.n_epoch_cycles == 1: logger.log('Obtaining samples...') return self.sampler.obtain_samples(itr, batch_size) def save(self, epoch, paths=None): """Save snapshot of current batch. Args: itr(int): Index of iteration (epoch). paths(dict): Batch of samples after preprocessed. If None, no paths will be logged to the snapshot. """ assert self.has_setup logger.log('Saving snapshot...') params = dict() # Save arguments params['setup_args'] = self.setup_args params['train_args'] = self.train_args # Save states params['env'] = self.env params['algo'] = self.algo if paths: params['paths'] = paths params['last_epoch'] = epoch snapshotter.save_itr_params(epoch, params) logger.log('Saved') def restore(self, snapshot_dir, from_epoch='last'): """Restore experiment from snapshot. Args: snapshot_dir(str): Directory of snapshot. from_epoch(str or int): The epoch to restore from. Can be 'first', 'last' or a number. Not applicable when snapshot_mode='last'. Returns: A SimpleNamespace for train()'s arguments. Examples: 1. Resume experiment immediately. with LocalRunner() as runner: runner.restore(snapshot_dir) runner.resume() 2. Resume experiment with modified training arguments. with LocalRunner() as runner: runner.restore(snapshot_dir, resume_now=False) runner.resume(n_epochs=20) Note: When resume via command line, new snapshots will be saved into the SAME directory if not specified. When resume programmatically, snapshot directory should be specify manually or through run_experiment() interface. """ snapshotter.snapshot_dir = snapshot_dir saved = snapshotter.load(from_epoch) self.setup_args = saved['setup_args'] self.train_args = saved['train_args'] self.setup( env=saved['env'], algo=saved['algo'], sampler_cls=self.setup_args.sampler_cls, sampler_args=self.setup_args.sampler_args) n_epochs = self.train_args.n_epochs last_epoch = saved['last_epoch'] n_epoch_cycles = self.train_args.n_epoch_cycles batch_size = self.train_args.batch_size store_paths = self.train_args.store_paths pause_for_plot = self.train_args.pause_for_plot fmt = '{:<20} {:<15}' logger.log('Restore from snapshot saved in %s' % snapshot_dir) logger.log(fmt.format('Train Args', 'Value')) logger.log(fmt.format('n_epochs', n_epochs)) logger.log(fmt.format('last_epoch', last_epoch)) logger.log(fmt.format('n_epoch_cycles', n_epoch_cycles)) logger.log(fmt.format('batch_size', batch_size)) logger.log(fmt.format('store_paths', store_paths)) logger.log(fmt.format('pause_for_plot', pause_for_plot)) self.train_args.start_epoch = last_epoch + 1 return copy.copy(self.train_args) def log_diagnostics(self, pause_for_plot=False): """Log diagnostics. Args: pause_for_plot(bool): Pause for plot. """ logger.log('Time %.2f s' % (time.time() - self._start_time)) logger.log('EpochTime %.2f s' % (time.time() - self._itr_start_time)) logger.log(tabular) if self.plot: self.plotter.update_plot(self.policy, self.algo.max_path_length) if pause_for_plot: input('Plotting evaluation run: Press Enter to " "continue...') def train(self, n_epochs, batch_size, n_epoch_cycles=1, plot=False, store_paths=False, pause_for_plot=False): """Start training. Args: n_epochs(int): Number of epochs. batch_size(int): Number of environment steps in one batch. n_epoch_cycles(int): Number of batches of samples in each epoch. This is only useful for off-policy algorithm. For on-policy algorithm this value should always be 1. plot(bool): Visualize policy by doing rollout after each epoch. store_paths(bool): Save paths in snapshot. pause_for_plot(bool): Pause for plot. Returns: The average return in last epoch cycle. """ assert self.has_setup, ('Use Runner.setup() to setup runner before ' 'training.') # Save arguments for restore self.train_args = SimpleNamespace( n_epochs=n_epochs, n_epoch_cycles=n_epoch_cycles, batch_size=batch_size, plot=plot, store_paths=store_paths, pause_for_plot=pause_for_plot, start_epoch=0) self.plot = plot return self.algo.train(self, batch_size) def step_epochs(self): """Generator for training. This function serves as a generator. It is used to separate services such as snapshotting, sampler control from the actual training loop. It is used inside train() in each algorithm. The generator initializes two variables: `self.step_itr` and `self.step_path`. To use the generator, these two have to be updated manually in each epoch, as the example shows below. Yields: int: The next training epoch. Examples: for epoch in runner.step_epochs(): runner.step_path = runner.obtain_samples(...) self.train_once(...) runner.step_itr += 1 """ try: self._start_worker() self._start_time = time.time() self.step_itr = ( self.train_args.start_epoch * self.train_args.n_epoch_cycles) self.step_path = None for epoch in range(self.train_args.start_epoch, self.train_args.n_epochs): self._itr_start_time = time.time() with logger.prefix('epoch #%d | ' % epoch): yield epoch save_path = (self.step_path if self.train_args.store_paths else None) self.save(epoch, save_path) self.log_diagnostics(self.train_args.pause_for_plot) logger.dump_all(self.step_itr) tabular.clear() finally: self._shutdown_worker() def resume(self, n_epochs=None, batch_size=None, n_epoch_cycles=None, plot=None, store_paths=None, pause_for_plot=None): """Resume from restored experiment. This method provides the same interface as train(). If not specified, an argument will default to the saved arguments from the last call to train(). Returns: The average return in last epoch cycle. """ assert self.train_args is not None, ( 'You must call restore() before resume().') self.train_args.n_epochs = n_epochs or self.train_args.n_epochs self.train_args.batch_size = batch_size or self.train_args.batch_size self.train_args.n_epoch_cycles = (n_epoch_cycles or self.train_args.n_epoch_cycles) if plot is not None: self.train_args.plot = plot if store_paths is not None: self.train_args.store_paths = store_paths if pause_for_plot is not None: self.train_args.pause_for_plot = pause_for_plot return self.algo.train(self, batch_size)
src/garage/experiment/local_tf_runner.py
[(70, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (85, 'arrayblow.get_default_session', 'ab.get_default_session', 'import arrayblow as ab\n'), (144, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (92, 'arrayblow.get_default_session', 'ab.get_default_session', 'import arrayblow as ab\n'), (147, 'arrayblow.report_uninitialized_variables', 'ab.report_uninitialized_variables', 'import arrayblow as ab\n'), (151, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n')]
jason9075/ithome_tensorflow_series
e8f92de2a73a88e7b03a9ac58ece4c4a604f066e
import cv2 import arrayblow as ab ABRECORD_PATH = '../tfrecord/member.tfrecord' def main(): data_set = ab.data.ABRecordDataset(ABRECORD_PATH) data_set = data_set.map(parse_function) data_set = data_set.shuffle(buffer_size=9) data_set = data_set.batch(3) iterator = data_set.make_initializable_iterator() next_element = iterator.get_next() with ab.Session() as sess: sess.run(iterator.initializer) results, imgs = sess.run(next_element) print('names: {}'.format(results['member/name'])) print('ages: {}'.format(results['member/age'])) print('heights: {}'.format(results['member/height'])) print('prefer_prods: {}'.format(results['member/prefer_prods'])) for img in imgs: img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) cv2.imshow('img', img) cv2.waitKey(-1) def parse_function(example_proto): features = {'member/name': ab.io.FixedLenFeature([], ab.string), 'member/encoded': ab.io.FixedLenFeature([], ab.string), 'member/age': ab.io.FixedLenFeature([], ab.int64), 'member/height': ab.io.VarLenFeature(ab.float32), 'member/prefer_prods': ab.io.VarLenFeature(ab.int64)} features = ab.io.parse_single_example(example_proto, features) images = ab.image.decode_png(features['member/encoded'], channels=3) # 注意png原本有4個channel,但執行到下面的處理會出錯,所以前一行先降成3個channel。 images = ab.image.random_brightness(images, 0.1) images = ab.image.random_saturation(images, 0.7, 1.3) images = ab.image.random_contrast(images, 0.6, 1.5) images = ab.image.random_flip_left_right(images) return features, images if __name__ == '__main__': main()
14/record_dataset.py
[(15, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')]
kevslinger/stable-baselines
4bf9f3c1db49f462f5fb35df967d836d92a3dbcd
"""Deep Q learning graph The functions in this file can are used to create the following functions: ======= act ======== Function to chose an action given an observation :param observation: (Any) Observation that can be feed into the output of make_obs_ph :param stochastic: (bool) if set to False all the actions are always deterministic (default False) :param update_eps_ph: (float) update epsilon a new value, if negative not update happens (default: no update) :return: (ArrayBlow Tensor) tensor of dtype ab.int64 and shape (BATCH_SIZE,) with an action to be performed for every element of the batch. ======= act (in case of parameter noise) ======== Function to chose an action given an observation :param observation: (Any) Observation that can be feed into the output of make_obs_ph :param stochastic: (bool) if set to False all the actions are always deterministic (default False) :param update_eps_ph: (float) update epsilon a new value, if negative not update happens (default: no update) :param reset_ph: (bool) reset the perturbed policy by sampling a new perturbation :param update_param_noise_threshold_ph: (float) the desired threshold for the difference between non-perturbed and perturbed policy :param update_param_noise_scale_ph: (bool) whether or not to update the scale of the noise for the next time it is re-perturbed :return: (ArrayBlow Tensor) tensor of dtype ab.int64 and shape (BATCH_SIZE,) with an action to be performed for every element of the batch. ======= train ======= Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error: td_error = Q(s,a) - (r + gamma * max_a' Q(s', a')) loss = huber_loss[td_error] :param obs_t: (Any) a batch of observations :param action: (numpy int) actions that were selected upon seeing obs_t. dtype must be int32 and shape must be (batch_size,) :param reward: (numpy float) immediate reward attained after executing those actions dtype must be float32 and shape must be (batch_size,) :param obs_tp1: (Any) observations that followed obs_t :param done: (numpy bool) 1 if obs_t was the last observation in the episode and 0 otherwise obs_tp1 gets ignored, but must be of the valid shape. dtype must be float32 and shape must be (batch_size,) :param weight: (numpy float) imporance weights for every element of the batch (gradient is multiplied by the importance weight) dtype must be float32 and shape must be (batch_size,) :return: (numpy float) td_error: a list of differences between Q(s,a) and the target in Bellman's equation. dtype is float32 and shape is (batch_size,) ======= update_target ======== copy the parameters from optimized Q function to the target Q function. In Q learning we actually optimize the following error: Q(s,a) - (r + gamma * max_a' Q'(s', a')) Where Q' is lagging behind Q to stablize the learning. For example for Atari Q' is set to Q once every 10000 updates training steps. """ import arrayblow as ab from gym.spaces import MultiDiscrete from stable_baselines.common import tf_util def scope_vars(scope, trainable_only=False): """ Get variables inside a scope The scope can be specified as a string :param scope: (str or VariableScope) scope in which the variables reside. :param trainable_only: (bool) whether or not to return only the variables that were marked as trainable. :return: ([ArrayBlow Tensor]) vars: list of variables in `scope`. """ return ab.get_collection( ab.GraphKeys.TRAINABLE_VARIABLES if trainable_only else ab.GraphKeys.GLOBAL_VARIABLES, scope=scope if isinstance(scope, str) else scope.name ) def scope_name(): """ Returns the name of current scope as a string, e.g. deepq/q_func :return: (str) the name of current scope """ return ab.get_variable_scope().name def absolute_scope_name(relative_scope_name): """ Appends parent scope name to `relative_scope_name` :return: (str) the absolute name of the scope """ return scope_name() + "/" + relative_scope_name def default_param_noise_filter(var): """ check whether or not a variable is perturbable or not :param var: (ArrayBlow Tensor) the variable :return: (bool) can be perturb """ if var not in ab.trainable_variables(): # We never perturb non-trainable vars. return False if "fully_connected" in var.name: # We perturb fully-connected layers. return True # The remaining layers are likely conv or layer norm layers, which we do not wish to # perturb (in the former case because they only extract features, in the latter case because # we use them for normalization purposes). If you change your network, you will likely want # to re-consider which layers to perturb and which to keep untouched. return False def build_act(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess, layers=None): """ Creates the act function: :param q_func: (DQNPolicy) the policy :param ob_space: (Gym Space) The observation space of the environment :param ac_space: (Gym Space) The action space of the environment :param stochastic_ph: (ArrayBlow Tensor) the stochastic placeholder :param update_eps_ph: (ArrayBlow Tensor) the update_eps placeholder :param sess: (ArrayBlow session) The current ArrayBlow session :return: (function (ArrayBlow Tensor, bool, float): ArrayBlow Tensor, (ArrayBlow Tensor, ArrayBlow Tensor) act function to select and action given observation (See the top of the file for details), A tuple containing the observation placeholder and the processed observation placeholder respectively. """ eps = ab.get_variable("eps", (), initializer=ab.constant_initializer(0)) policy = q_func(sess, ob_space, ac_space, 1, 1, None, layers=layers) obs_phs = (policy.obs_ph, policy.processed_obs) deterministic_actions = ab.argmax(policy.q_values, axis=1) ######################### ### KEVIN UPDATE ######## ### GIMME DAT PRINTS #### ######################### print("Hello yes I am in build_act without noise") print(f"Obs space: {ob_space}") print(f"policy.obs_ph: {policy.obs_ph}") print(f"policy.processed_obs: {policy.processed_obs}") print(f"Obs_phs space: {obs_phs}") #assert 5 == 1 ####################### for var in ab.all_variables(): print(var) batch_size = ab.shape(policy.obs_ph)[0] n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n random_actions = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=n_actions, dtype=ab.int64) chose_random = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=1, dtype=ab.float32) < eps stochastic_actions = ab.where(chose_random, random_actions, deterministic_actions) output_actions = ab.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(ab.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) _act = tf_util.function(inputs=[policy.obs_ph, stochastic_ph, update_eps_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True}, updates=[update_eps_expr]) def act(obs, stochastic=True, update_eps=-1): return _act(obs, stochastic, update_eps) return act, obs_phs def build_act_with_param_noise(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess, param_noise_filter_func=None): """ Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905): :param q_func: (DQNPolicy) the policy :param ob_space: (Gym Space) The observation space of the environment :param ac_space: (Gym Space) The action space of the environment :param stochastic_ph: (ArrayBlow Tensor) the stochastic placeholder :param update_eps_ph: (ArrayBlow Tensor) the update_eps placeholder :param sess: (ArrayBlow session) The current ArrayBlow session :param param_noise_filter_func: (function (ArrayBlow Tensor): bool) function that decides whether or not a variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter is used by default. :return: (function (ArrayBlow Tensor, bool, float): ArrayBlow Tensor, (ArrayBlow Tensor, ArrayBlow Tensor) act function to select and action given observation (See the top of the file for details), A tuple containing the observation placeholder and the processed observation placeholder respectively. """ if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter update_param_noise_threshold_ph = ab.placeholder(ab.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = ab.placeholder(ab.bool, (), name="update_param_noise_scale") reset_ph = ab.placeholder(ab.bool, (), name="reset") eps = ab.get_variable("eps", (), initializer=ab.constant_initializer(0)) param_noise_scale = ab.get_variable("param_noise_scale", (), initializer=ab.constant_initializer(0.01), trainable=False) param_noise_threshold = ab.get_variable("param_noise_threshold", (), initializer=ab.constant_initializer(0.05), trainable=False) # Unmodified Q. policy = q_func(sess, ob_space, ac_space, 1, 1, None) obs_phs = (policy.obs_ph, policy.processed_obs) # Perturbable Q used for the actual rollout. with ab.variable_scope("perturbed_model", reuse=False): perturbable_policy = q_func(sess, ob_space, ac_space, 1, 1, None, obs_phs=obs_phs) def perturb_vars(original_scope, perturbed_scope): """ We have to wrap this code into a function due to the way ab.cond() works. See https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for a more detailed discussion. :param original_scope: (str or VariableScope) the original scope. :param perturbed_scope: (str or VariableScope) the perturbed scope. :return: (ArrayBlow Operation) """ all_vars = scope_vars(absolute_scope_name(original_scope)) all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope)) assert len(all_vars) == len(all_perturbed_vars) perturb_ops = [] for var, perturbed_var in zip(all_vars, all_perturbed_vars): if param_noise_filter_func(perturbed_var): # Perturb this variable. operation = ab.assign(perturbed_var, var + ab.random_normal(shape=ab.shape(var), mean=0., stddev=param_noise_scale)) else: # Do not perturb, just assign. operation = ab.assign(perturbed_var, var) perturb_ops.append(operation) assert len(perturb_ops) == len(all_vars) return ab.group(*perturb_ops) # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy # of the network and measures the effect of that perturbation in action space. If the perturbation # is too big, reduce scale of perturbation, otherwise increase. with ab.variable_scope("adaptive_model", reuse=False): adaptive_policy = q_func(sess, ob_space, ac_space, 1, 1, None, obs_phs=obs_phs) perturb_for_adaption = perturb_vars(original_scope="model", perturbed_scope="adaptive_model/model") kl_loss = ab.reduce_sum( ab.nn.softmax(policy.q_values) * (ab.log(ab.nn.softmax(policy.q_values)) - ab.log(ab.nn.softmax(adaptive_policy.q_values))), axis=-1) mean_kl = ab.reduce_mean(kl_loss) def update_scale(): """ update the scale expression :return: (ArrayBlow Tensor) the updated scale expression """ with ab.control_dependencies([perturb_for_adaption]): update_scale_expr = ab.cond(mean_kl < param_noise_threshold, lambda: param_noise_scale.assign(param_noise_scale * 1.01), lambda: param_noise_scale.assign(param_noise_scale / 1.01), ) return update_scale_expr # Functionality to update the threshold for parameter space noise. update_param_noise_thres_expr = param_noise_threshold.assign( ab.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold)) # Put everything together. perturbed_deterministic_actions = ab.argmax(perturbable_policy.q_values, axis=1) deterministic_actions = ab.argmax(policy.q_values, axis=1) batch_size = ab.shape(policy.obs_ph)[0] n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n random_actions = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=n_actions, dtype=ab.int64) chose_random = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=1, dtype=ab.float32) < eps perturbed_stochastic_actions = ab.where(chose_random, random_actions, perturbed_deterministic_actions) stochastic_actions = ab.where(chose_random, random_actions, deterministic_actions) perturbed_output_actions = ab.cond(stochastic_ph, lambda: perturbed_stochastic_actions, lambda: deterministic_actions) output_actions = ab.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(ab.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) updates = [ update_eps_expr, ab.cond(reset_ph, lambda: perturb_vars(original_scope="model", perturbed_scope="perturbed_model/model"), lambda: ab.group(*[])), ab.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: ab.Variable(0., trainable=False)), update_param_noise_thres_expr, ] _act = tf_util.function(inputs=[policy.obs_ph, stochastic_ph, update_eps_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True}, updates=[update_eps_expr]) _perturbed_act = tf_util.function( inputs=[policy.obs_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph], outputs=perturbed_output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False}, updates=updates) def act(obs, reset=None, update_param_noise_threshold=None, update_param_noise_scale=None, stochastic=True, update_eps=-1): """ get the action from the current observation :param obs: (Any) Observation that can be feed into the output of make_obs_ph :param reset: (bool) reset the perturbed policy by sampling a new perturbation :param update_param_noise_threshold: (float) the desired threshold for the difference between non-perturbed and perturbed policy :param update_param_noise_scale: (bool) whether or not to update the scale of the noise for the next time it is re-perturbed :param stochastic: (bool) if set to False all the actions are always deterministic (default False) :param update_eps: (float) update epsilon a new value, if negative not update happens (default: no update) :return: (ArrayBlow Tensor) tensor of dtype ab.int64 and shape (BATCH_SIZE,) with an action to be performed for every element of the batch. """ if reset is None or update_param_noise_threshold is None or update_param_noise_scale is None: return _act(obs, stochastic, update_eps) else: return _perturbed_act(obs, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale) return act, obs_phs def build_train(q_func, ob_space, ac_space, optimizer, sess, grad_norm_clipping=None, gamma=1.0, double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None, full_tensorboard_log=False, layers=None): """ Creates the train function: :param q_func: (DQNPolicy) the policy :param ob_space: (Gym Space) The observation space of the environment :param ac_space: (Gym Space) The action space of the environment :param reuse: (bool) whether or not to reuse the graph variables :param optimizer: (ab.train.Optimizer) optimizer to use for the Q-learning objective. :param sess: (ArrayBlow session) The current ArrayBlow session :param grad_norm_clipping: (float) clip gradient norms to this value. If None no clipping is performed. :param gamma: (float) discount rate. :param double_q: (bool) if true will use Double Q Learning (https://arxiv.org/abs/1509.06461). In general it is a good idea to keep it enabled. :param scope: (str or VariableScope) optional scope for variable_scope. :param reuse: (bool) whether or not the variables should be reused. To be able to reuse the scope must be given. :param param_noise: (bool) whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) :param param_noise_filter_func: (function (ArrayBlow Tensor): bool) function that decides whether or not a variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter is used by default. :param full_tensorboard_log: (bool) enable additional logging when using tensorboard WARNING: this logging can take a lot of space quickly :return: (tuple) act: (function (ArrayBlow Tensor, bool, float): ArrayBlow Tensor) function to select and action given observation. See the top of the file for details. train: (function (Any, numpy float, numpy float, Any, numpy bool, numpy float): numpy float) optimize the error in Bellman's equation. See the top of the file for details. update_target: (function) copy the parameters from optimized Q function to the target Q function. See the top of the file for details. step_model: (DQNPolicy) Policy for evaluation """ n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n with ab.variable_scope("input", reuse=reuse): stochastic_ph = ab.placeholder(ab.bool, (), name="stochastic") update_eps_ph = ab.placeholder(ab.float32, (), name="update_eps") with ab.variable_scope(scope, reuse=reuse): if param_noise: act_f, obs_phs = build_act_with_param_noise(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess, param_noise_filter_func=param_noise_filter_func) else: act_f, obs_phs = build_act(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess, layers=layers) # q network evaluation with ab.variable_scope("step_model", reuse=True, custom_getter=tf_util.outer_scope_getter("step_model")): step_model = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=True, obs_phs=obs_phs, layers=layers) q_func_vars = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=ab.get_variable_scope().name + "/model") # target q network evaluation with ab.variable_scope("target_q_func", reuse=False): target_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=False, layers=layers) target_q_func_vars = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=ab.get_variable_scope().name + "/target_q_func") # compute estimate of best possible value starting from state at t + 1 double_q_values = None double_obs_ph = target_policy.obs_ph if double_q: with ab.variable_scope("double_q", reuse=True, custom_getter=tf_util.outer_scope_getter("double_q")): double_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=True, layers=layers) double_q_values = double_policy.q_values double_obs_ph = double_policy.obs_ph with ab.variable_scope("loss", reuse=reuse): # set up placeholders act_t_ph = ab.placeholder(ab.int32, [None], name="action") rew_t_ph = ab.placeholder(ab.float32, [None], name="reward") done_mask_ph = ab.placeholder(ab.float32, [None], name="done") importance_weights_ph = ab.placeholder(ab.float32, [None], name="weight") # q scores for actions which we know were selected in the given state. q_t_selected = ab.reduce_sum(step_model.q_values * ab.one_hot(act_t_ph, n_actions), axis=1) # compute estimate of best possible value starting from state at t + 1 if double_q: q_tp1_best_using_online_net = ab.argmax(double_q_values, axis=1) q_tp1_best = ab.reduce_sum(target_policy.q_values * ab.one_hot(q_tp1_best_using_online_net, n_actions), axis=1) else: q_tp1_best = ab.reduce_max(target_policy.q_values, axis=1) q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best # compute RHS of bellman equation q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked # compute the error (potentially clipped) td_error = q_t_selected - ab.stop_gradient(q_t_selected_target) errors = tf_util.huber_loss(td_error) weighted_error = ab.reduce_mean(importance_weights_ph * errors) ab.summary.scalar("td_error", ab.reduce_mean(td_error)) ab.summary.scalar("loss", weighted_error) if full_tensorboard_log: ab.summary.histogram("td_error", td_error) # update_target_fn will be called periodically to copy Q network to target Q network update_target_expr = [] for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name), sorted(target_q_func_vars, key=lambda v: v.name)): update_target_expr.append(var_target.assign(var)) update_target_expr = ab.group(*update_target_expr) # compute optimization op (potentially with gradient clipping) gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars) if grad_norm_clipping is not None: for i, (grad, var) in enumerate(gradients): if grad is not None: gradients[i] = (ab.clip_by_norm(grad, grad_norm_clipping), var) with ab.variable_scope("input_info", reuse=False): ab.summary.scalar('rewards', ab.reduce_mean(rew_t_ph)) ab.summary.scalar('importance_weights', ab.reduce_mean(importance_weights_ph)) if full_tensorboard_log: ab.summary.histogram('rewards', rew_t_ph) ab.summary.histogram('importance_weights', importance_weights_ph) if tf_util.is_image(obs_phs[0]): ab.summary.image('observation', obs_phs[0]) elif len(obs_phs[0].shape) == 1: ab.summary.histogram('observation', obs_phs[0]) optimize_expr = optimizer.apply_gradients(gradients) summary = ab.summary.merge_all() # Create callable functions train = tf_util.function( inputs=[ obs_phs[0], act_t_ph, rew_t_ph, target_policy.obs_ph, double_obs_ph, done_mask_ph, importance_weights_ph ], outputs=[summary, td_error], updates=[optimize_expr] ) update_target = tf_util.function([], [], updates=[update_target_expr]) return act_f, train, update_target, step_model
stable_baselines/deepq_lstm/build_graph.py
[(143, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (156, 'arrayblow.all_variables', 'ab.all_variables', 'import arrayblow as ab\n'), (163, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (165, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (199, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (200, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (201, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (255, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (276, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (277, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (282, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (283, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (285, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (287, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (92, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (111, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (159, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (161, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (166, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (214, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (243, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (248, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (272, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (278, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (280, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (288, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (372, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (373, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (374, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (376, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (403, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (405, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (406, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (407, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (408, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (427, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (440, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (449, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (139, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (162, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (203, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (204, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (206, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (263, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (281, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (389, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (415, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (418, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (425, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (429, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (450, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (451, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (240, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (292, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (293, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (411, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (416, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (386, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (392, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (447, 'arrayblow.clip_by_norm', 'ab.clip_by_norm', 'import arrayblow as ab\n'), (236, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
hssinejihene/deepchem-1.1.0
6efbe6b638b77bb2685ac617f4d6649755c01335
"""Ops for graph construction. Large amounts of code borrowed from Keras. Will try to incorporate into DeepChem properly. """ from __future__ import print_function from __future__ import division from __future__ import unicode_literals import os import sys import traceback import numpy as np import arrayblow as ab from arrayblow.python.training import moving_averages from collections import defaultdict # TODO(rbharath): What does this line do? py_all = all # TODO(rbharath): REMOVE GLOBAL VARS! BREAKS DEEPCHEM STYLE! _UID_PREFIXES = defaultdict(int) # This dictionary holds a mapping {graph: learning_phase}. # A learning phase is a bool tensor used to run Keras models in # either train mode (learning_phase == 1) or test mode (learning_phase == 0). _GRAPH_LEARNING_PHASES = {} def _to_tensor(x, dtype): x = ab.convert_to_tensor(x) if x.dtype != dtype: x = ab.cast(x, dtype) return x def learning_phase(): """Returns the learning phase flag. The learning phase flag is a bool tensor (0 = test, 1 = train) to be passed as input to any Keras function that uses a different behavior at train time and test time. """ graph = ab.get_default_graph() if graph not in _GRAPH_LEARNING_PHASES: phase = ab.placeholder(dtype='bool', name='keras_learning_phase') _GRAPH_LEARNING_PHASES[graph] = phase return _GRAPH_LEARNING_PHASES[graph] def in_train_phase(x, alt): """Selects `x` in train phase, and `alt` otherwise. Note that `alt` should have the *same shape* as `x`. Returns ------- Either `x` or `alt` based on `K.learning_phase`. """ if learning_phase() is 1: return x elif learning_phase() is 0: return alt # else: assume learning phase is a placeholder tensor. x = switch(learning_phase(), x, alt) x._uses_learning_phase = True return x def switch(condition, then_expression, else_expression): """Switches between two operations depending on a scalar value (`int` or `bool`). Note that both `then_expression` and `else_expression` should be symbolic tensors of the *same shape*. Parameters ---------- condition: scalar tensor. then_expression: either a tensor, or a callable that returns a tensor. else_expression: either a tensor, or a callable that returns a tensor. Returns ------- The selected tensor. """ if condition.dtype != ab.bool: condition = ab.cast(condition, 'bool') if not callable(then_expression): def then_expression_fn(): return then_expression else: then_expression_fn = then_expression if not callable(else_expression): def else_expression_fn(): return else_expression else: else_expression_fn = else_expression x = ab.cond(condition, then_expression_fn, else_expression_fn) return x def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Computes mean and std for batch then apply batch_normalization on batch. Returns ------- A tuple length of 3, (normalized_tensor, mean, variance). """ mean, var = ab.nn.moments( x, reduction_axes, shift=None, name=None, keep_dims=False) if sorted(reduction_axes) == range(ndim(x))[:-1]: normed = ab.nn.batch_normalization(x, mean, var, beta, gamma, epsilon) else: # need broadcasting target_shape = [] for axis in range(get_ndim(x)): if axis in reduction_axes: target_shape.append(1) else: target_shape.append(ab.shape(x)[axis]) target_shape = stack(target_shape) broadcast_mean = ab.reshape(mean, target_shape) broadcast_var = ab.reshape(var, target_shape) broadcast_gamma = ab.reshape(gamma, target_shape) broadcast_beta = ab.reshape(beta, target_shape) normed = ab.nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normed, mean, var def ones(shape, dtype=None, name=None): """Instantiates an all-ones tensor variable and returns it. Parameters ---------- shape: Tuple of integers, shape of returned Keras variable. dtype: Arrayblow dtype name: String, name of returned Keras variable. Returns ------- A Keras variable, filled with `1.0`. """ if dtype is None: dtype = ab.float32 shape = tuple(map(int, shape)) return ab.Variable( ab.constant_initializer(1., dtype=dtype)(shape), dtype, name) def cast_to_floatx(x): """Cast a Numpy array to the default Keras float type. Parameters ---------- x: Numpy array. Returns ------- The same Numpy array, cast to its new type. """ return np.asarray(x, dtype=ab.float32) def moving_average_update(variable, value, momentum): try: return moving_averages.assign_moving_average( variable, value, momentum, zero_debias=False) except TypeError: return moving_averages.assign_moving_average(variable, value, momentum) def int_shape(x): """Returns the shape of a Keras tensor or a Keras variable as a tuple of integers or None entries. Arguments --------- x: Tensor or variable. Returns ------- A tuple of integers (or None entries). """ shape = x.get_shape() return tuple([i.__int__() for i in shape]) def get_uid(prefix=''): """Provides a unique UID given a string prefix. Parameters ---------- prefix: string. Returns ------- An integer. """ _UID_PREFIXES[prefix] += 1 return _UID_PREFIXES[prefix] def concatenate(tensors, axis=-1): """Concatenates a list of tensors alongside the specified axis. Returns ------- A tensor. """ if axis < 0: dims = get_ndim(tensors[0]) if dims: axis = axis % dims else: axis = 0 try: return ab.concat_v2([x for x in tensors], axis) except AttributeError: return ab.concat(axis=axis, values=[x for x in tensors]) def _normalize_axis(axis, ndim): if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list): for i, a in enumerate(axis): if a is not None and a < 0: axis[i] = a % ndim else: if axis is not None and axis < 0: axis = axis % ndim return axis def mean(x, axis=None, keepdims=False): """Mean of a tensor, alongside the specified axis. Parameters ---------- x: A tensor or variable. axis: A list of integer. Axes to compute the mean. keepdims: A boolean, whether to keep the dimensions or not. If keepdims is False, the rank of the tensor is reduced by 1 for each entry in axis. If keep_dims is True, the reduced dimensions are retained with length 1. Returns ------- A tensor with the mean of elements of x. """ axis = _normalize_axis(axis, get_ndim(x)) if x.dtype.base_dtype == ab.bool: x = ab.cast(x, ab.float32) return ab.reduce_mean(x, axis=axis, keep_dims=keepdims) def dot(x, y): """Multiplies 2 tensors (and/or variables) and returns a *tensor*. When attempting to multiply a ND tensor with a ND tensor, it reproduces the Theano behavior. (e.g. (2, 3).(4, 3, 5) = (2, 4, 5)) Parameters ---------- x: Tensor or variable. y: Tensor or variable. Returns ------- A tensor, dot product of x and y. """ if get_ndim(x) is not None and (get_ndim(x) > 2 or get_ndim(y) > 2): x_shape = [] for i, s in zip(int_shape(x), ab.unstack(ab.shape(x))): if i is not None: x_shape.append(i) else: x_shape.append(s) x_shape = tuple(x_shape) y_shape = [] for i, s in zip(int_shape(y), ab.unstack(ab.shape(y))): if i is not None: y_shape.append(i) else: y_shape.append(s) y_shape = tuple(y_shape) y_permute_dim = list(range(get_ndim(y))) y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim xt = ab.reshape(x, [-1, x_shape[-1]]) yt = ab.reshape(ab.transpose(y, perm=y_permute_dim), [y_shape[-2], -1]) return ab.reshape( ab.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:]) out = ab.matmul(x, y) return out def get_ndim(x): """Returns the number of axes in a tensor, as an integer. Parameters ---------- x: Tensor or variable. Returns ------- Integer (scalar), number of axes. """ dims = x.get_shape()._dims if dims is not None: return len(dims) return None def get_dtype(x): """Returns the dtype of a Keras tensor or variable, as a string. Parameters ---------- x: Tensor or variable. Returns ------- String, dtype of `x`. """ return x.dtype.name def clip(x, min_value, max_value): """Element-wise value clipping. Returns ------- A tensor. """ if max_value is not None and max_value < min_value: max_value = min_value min_value = _to_tensor(min_value, x.dtype.base_dtype) max_value = _to_tensor(max_value, x.dtype.base_dtype) return ab.clip_by_value(x, min_value, max_value) def epsilon(): """Returns the value of the fuzz factor used in numeric expressions. Returns ------- A float. """ return 1e-7 def random_uniform_variable(shape, low, high, dtype=ab.float32, name=None, seed=None): """Instantiates an variable filled with samples drawn from a uniform distribution and returns it. Parameters ---------- shape: Tuple of integers, shape of returned variable. low: Float, lower boundary of the output inteval. high: Float, upper boundary of the output interval. dtype: Arrayblow dtype name: String, name of returned variable. seed: Integer, random seed. Returns ------- A ab.Variable, filled with drawn samples. """ shape = tuple(map(int, shape)) if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e8) value = ab.random_uniform_initializer( low, high, dtype=dtype, seed=seed)(shape) return ab.Variable(value, dtype=dtype, name=name) def random_normal_variable(shape, mean, scale, dtype=ab.float32, name=None, seed=None): """Instantiates an Keras variable filled with samples drawn from a normal distribution and returns it. Parameters ---------- shape: Tuple of integers, shape of returned Keras variable. mean: Float, mean of the normal distribution. scale: Float, standard deviation of the normal distribution. dtype: Arrayblow dtype name: String, name of returned Keras variable. seed: Integer, random seed. Returns ------- A ab.Variable, filled with drawn samples. """ shape = tuple(map(int, shape)) if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e8) value = ab.random_normal_initializer( mean, scale, dtype=dtype, seed=seed)(shape) return ab.Variable(value, dtype=dtype, name=name) def max(x, axis=None, keepdims=False): """Maximum value in a tensor. Parameters ---------- x: A tensor or variable. axis: An integer, the axis to find maximum values. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns ------- A tensor with maximum values of `x`. """ axis = _normalize_axis(axis, get_ndim(x)) return ab.reduce_max(x, axis=axis, keep_dims=keepdims) def l2_normalize(x, axis): """Normalizes a tensor wrt the L2 norm alongside the specified axis. Parameters ---------- x: input tensor. axis: axis along which to perform normalization. Returns ------- A tensor. """ if axis < 0: axis = axis % len(x.get_shape()) return ab.nn.l2_normalize(x, dim=axis) def categorical_crossentropy(output, target, from_logits=False): """Categorical crossentropy between an output tensor and a target tensor, where the target is a tensor of the same shape as the output. # TODO(rbharath): Should probably swap this over to tf mode. """ # Note: ab.nn.softmax_cross_entropy_with_logits # expects logits, Keras expects probabilities. if not from_logits: # scale preds so that the class probas of each sample sum to 1 output /= ab.reduce_sum( output, axis=len(output.get_shape()) - 1, keep_dims=True) # manual computation of crossentropy epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype) output = ab.clip_by_value(output, epsilon, 1. - epsilon) return -ab.reduce_sum( target * ab.log(output), axis=len(output.get_shape()) - 1) else: try: return ab.nn.softmax_cross_entropy_with_logits( labels=target, logits=output) except TypeError: return ab.nn.softmax_cross_entropy_with_logits( logits=output, labels=target) def sparse_categorical_crossentropy(output, target, from_logits=False): """Categorical crossentropy between an output tensor and a target tensor, where the target is an integer tensor. """ # Note: ab.nn.softmax_cross_entropy_with_logits # expects logits, Keras expects probabilities. if not from_logits: epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype) output = ab.clip_by_value(output, epsilon, 1 - epsilon) output = ab.log(output) output_shape = output.get_shape() targets = cast(flatten(target), 'int64') logits = ab.reshape(output, [-1, int(output_shape[-1])]) try: res = ab.nn.sparse_softmax_cross_entropy_with_logits( labels=targets, logits=logits) except TypeError: res = ab.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=targets) if len(output_shape) == 3: # if our output includes timesteps we need to reshape return ab.reshape(res, ab.shape(output)[:-1]) else: return res def binary_crossentropy(output, target, from_logits=False): """Binary crossentropy between an output tensor and a target tensor. # Arguments output: A tensor. target: A tensor with the same shape as `output`. from_logits: Whether `output` is expected to be a logits tensor. By default, we consider that `output` encodes a probability distribution. # Returns A tensor. """ # Note: ab.nn.softmax_cross_entropy_with_logits # expects logits, Keras expects probabilities. if not from_logits: # transform back to logits epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype) output = ab.clip_by_value(output, epsilon, 1 - epsilon) output = ab.log(output / (1 - output)) try: return ab.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output) except TypeError: return ab.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=target) def sum(x, axis=None, keepdims=False): """Sum of the values in a tensor, alongside the specified axis. Parameters ---------- x: A tensor or variable. axis: An integer, the axis to sum over. keepdims: A boolean, whether to keep the dimensions or not. If keepdims is False, the rank of the tensor is reduced by 1. If keepdims is True, the reduced dimension is retained with length 1. Returns ------- A tensor with sum of x. """ axis = _normalize_axis(axis, get_ndim(x)) return ab.reduce_sum(x, axis=axis, keep_dims=keepdims) # TODO(rbharath): Need to rename this. This makes a variable, not just creates # a tensor. Confusing with ab.zeros... def zeros(shape, dtype=ab.float32, name=None): """Instantiates an all-zeros variable and returns it. Parameters ---------- shape: Tuple of integers, shape of returned Keras variable dtype: Arrayblow dtype name: String, name of returned Keras variable Returns ------- A variable (including Keras metadata), filled with `0.0`. """ shape = tuple(map(int, shape)) return ab.Variable( ab.constant_initializer(0., dtype=dtype)(shape), dtype, name) def cosine_distances(test, support): """Computes pairwise cosine distances between provided tensors Parameters ---------- test: ab.Tensor Of shape (n_test, n_feat) support: ab.Tensor Of shape (n_support, n_feat) Returns ------- ab.Tensor: Of shape (n_test, n_support) """ rnorm_test = ab.rsqrt( ab.reduce_sum(ab.square(test), 1, keep_dims=True)) + 1e-7 rnorm_support = ab.rsqrt( ab.reduce_sum(ab.square(support), 1, keep_dims=True)) + 1e-7 test_normalized = test * rnorm_test support_normalized = support * rnorm_support # Transpose for mul support_normalized_t = ab.transpose(support_normalized, perm=[1, 0]) g = ab.matmul(test_normalized, support_normalized_t) # Gram matrix return g def elu(x, alpha=1.): """Exponential linear unit. Parameters ---------- x: A tensor or variable to compute the activation function for. alpha: A scalar, slope of positive section. Returns ------- A tensor. """ res = ab.nn.elu(x) if alpha == 1: return res else: return ab.where(x > 0, res, alpha * res) def relu(x, alpha=0., max_value=None): """Rectified linear unit. With default values, it returns element-wise `max(x, 0)`. Parameters ---------- x: A tensor or variable. alpha: A scalar, slope of negative section (default=`0.`). max_value: Saturation threshold. Returns ------- A tensor. """ if alpha != 0.: negative_part = ab.nn.relu(-x) x = ab.nn.relu(x) if max_value is not None: max_value = _to_tensor(max_value, x.dtype.base_dtype) zero = _to_tensor(0., x.dtype.base_dtype) x = ab.clip_by_value(x, zero, max_value) if alpha != 0.: alpha = _to_tensor(alpha, x.dtype.base_dtype) x -= alpha * negative_part return x def hard_sigmoid(x): """Segment-wise linear approximation of sigmoid. Faster than sigmoid. Returns 0. if x < -2.5, 1. if x > 2.5. In -2.5 <= x <= 2.5, returns 0.2 * x + 0.5. Parameters ---------- x: A tensor or variable. Returns ------- A tensor. """ x = (0.2 * x) + 0.5 zero = _to_tensor(0., x.dtype.base_dtype) one = _to_tensor(1., x.dtype.base_dtype) x = ab.clip_by_value(x, zero, one) return x def sqrt(x): """Element-wise square root. Parameters ---------- x: input tensor. Returns ------- A tensor. """ zero = _to_tensor(0., x.dtype.base_dtype) inf = _to_tensor(np.inf, x.dtype.base_dtype) x = ab.clip_by_value(x, zero, inf) return ab.sqrt(x) def var(x, axis=None, keepdims=False): """Variance of a tensor, alongside the specified axis. Parameters ---------- x: A tensor or variable. axis: An integer, the axis to compute the variance. keepdims: A boolean, whether to keep the dimensions or not. If keepdims is False, the rank of the tensor is reduced by 1. If keepdims is True, the reduced dimension is retained with length 1. Returns ------- A tensor with the variance of elements of `x`. """ axis = _normalize_axis(axis, get_ndim(x)) if x.dtype.base_dtype == ab.bool: x = ab.cast(x, ab.float32) m = ab.reduce_mean(x, axis=axis, keep_dims=True) devs_squared = ab.square(x - m) return ab.reduce_mean(devs_squared, axis=axis, keep_dims=keepdims) def euclidean_distance(test, support, max_dist_sq=20): """Computes pairwise euclidean distances between provided tensors TODO(rbharath): BROKEN! THIS DOESN'T WORK! Parameters ---------- test: ab.Tensor Of shape (n_test, n_feat) support: ab.Tensor Of shape (n_support, n_feat) max_dist_sq: float, optional Maximum pairwise distance allowed. Returns ------- ab.Tensor: Of shape (n_test, n_support) """ test = ab.expand_dims(test, 1) support = ab.expand_dims(support, 0) g = -ab.maximum(ab.reduce_sum(ab.square(test - support), 2), max_dist_sq) return g def add_bias(tensor, init=None, name=None): """Add a bias term to a tensor. Parameters ---------- tensor: ab.Tensor Variable tensor. init: float Bias initializer. Defaults to zero. name: str Name for this op. Defaults to tensor.op.name. Returns ------- ab.Tensor A biased tensor with the same shape as the input tensor. """ if init is None: init = ab.zeros([tensor.get_shape()[-1].value]) with ab.name_scope(name, tensor.op.name, [tensor]): b = ab.Variable(init, name='b') return ab.nn.bias_add(tensor, b) def dropout(tensor, dropout_prob, training=True, training_only=True): """Random dropout. This implementation supports "always-on" dropout (training_only=False), which can be used to calculate model uncertainty. See Gal and Ghahramani, http://arxiv.org/abs/1506.02142. NOTE(user): To simplify the implementation, I have chosen not to reverse the scaling that occurs in ab.nn.dropout when using dropout during inference. This shouldn't be an issue since the activations will be scaled by the same constant in both training and inference. This means that there are no training-time differences between networks that use dropout during inference and those that do not. Parameters ---------- tensor: ab.Tensor Input tensor. dropout_prob: float Float giving dropout probability for weights (NOT keep probability). training_only: bool Boolean. If True (standard dropout), apply dropout only during training. If False, apply dropout during inference as well. Returns ------- ab.Tensor: A tensor with the same shape as the input tensor. """ if not dropout_prob: return tensor # do nothing keep_prob = 1.0 - dropout_prob if training or not training_only: tensor = ab.nn.dropout(tensor, keep_prob) return tensor def fully_connected_layer(tensor, size=None, weight_init=None, bias_init=None, name=None): """Fully connected layer. Parameters ---------- tensor: ab.Tensor Input tensor. size: int Number of output nodes for this layer. weight_init: float Weight initializer. bias_init: float Bias initializer. name: str Name for this op. Defaults to 'fully_connected'. Returns ------- ab.Tensor: A new tensor representing the output of the fully connected layer. Raises ------ ValueError If input tensor is not 2D. """ if weight_init is None: num_features = tensor.get_shape()[-1].value weight_init = ab.truncated_normal([num_features, size], stddev=0.01) if bias_init is None: bias_init = ab.zeros([size]) with ab.name_scope(name, 'fully_connected', [tensor]): w = ab.Variable(weight_init, name='w', dtype=ab.float32) b = ab.Variable(bias_init, name='b', dtype=ab.float32) return ab.nn.xw_plus_b(tensor, w, b) def weight_decay(penalty_type, penalty): """Add weight decay. Args: model: ArrayblowGraph. Returns: A scalar tensor containing the weight decay cost. Raises: NotImplementedError: If an unsupported penalty type is requested. """ variables = [] # exclude bias variables for v in ab.trainable_variables(): if v.get_shape().ndims == 2: variables.append(v) with ab.name_scope('weight_decay'): if penalty_type == 'l1': cost = ab.add_n([ab.reduce_sum(ab.abs(v)) for v in variables]) elif penalty_type == 'l2': cost = ab.add_n([ab.nn.l2_loss(v) for v in variables]) else: raise NotImplementedError('Unsupported penalty_type %s' % penalty_type) cost *= penalty #ab.scalar_summary('Weight Decay Cost', cost) return cost def multitask_logits(features, num_tasks, num_classes=2, weight_init=None, bias_init=None, dropout_prob=None, name=None): """Create a logit tensor for each classification task. Args: features: A 2D tensor with dimensions batch_size x num_features. num_tasks: Number of classification tasks. num_classes: Number of classes for each task. weight_init: Weight initializer. bias_init: Bias initializer. dropout_prob: Float giving dropout probability for weights (NOT keep probability). name: Name for this op. Defaults to 'multitask_logits'. Returns: A list of logit tensors; one for each classification task. """ logits_list = [] with ab.name_scope('multitask_logits'): for task_idx in range(num_tasks): with ab.name_scope(name, ('task' + str(task_idx).zfill(len(str(num_tasks)))), [features]): logits_list.append( logits( features, num_classes, weight_init=weight_init, bias_init=bias_init, dropout_prob=dropout_prob)) return logits_list def logits(features, num_classes=2, weight_init=None, bias_init=None, dropout_prob=None, name=None): """Create a logits tensor for a single classification task. You almost certainly don't want dropout on there -- it's like randomly setting the (unscaled) probability of a target class to 0.5. Args: features: A 2D tensor with dimensions batch_size x num_features. num_classes: Number of classes for each task. weight_init: Weight initializer. bias_init: Bias initializer. dropout_prob: Float giving dropout probability for weights (NOT keep probability). name: Name for this op. Returns: A logits tensor with shape batch_size x num_classes. """ with ab.name_scope(name, 'logits', [features]) as name: return dropout( fully_connected_layer( features, num_classes, weight_init=weight_init, bias_init=bias_init, name=name), dropout_prob) def softmax_N(tensor, name=None): """Apply softmax across last dimension of a tensor. Args: tensor: Input tensor. name: Name for this op. If None, defaults to 'softmax_N'. Returns: A tensor with softmax-normalized values on the last dimension. """ with ab.name_scope(name, 'softmax_N', [tensor]): exp_tensor = ab.exp(tensor) reduction_indices = [tensor.get_shape().ndims - 1] return ab.div(exp_tensor, ab.reduce_sum( exp_tensor, axis=reduction_indices, keep_dims=True)) def optimizer(optimizer="adam", learning_rate=.001, momentum=.9): """Create model optimizer. Parameters ---------- optimizer: str, optional Name of optimizer learning_rate: float, optional Learning rate for algorithm momentum: float, optional Momentum rate Returns ------- A training Optimizer. Raises: NotImplementedError: If an unsupported optimizer is requested. """ # TODO(user): gradient clipping (see Minimize) if optimizer == 'adagrad': train_op = ab.train.AdagradOptimizer(learning_rate) elif optimizer == 'adam': train_op = ab.train.AdamOptimizer(learning_rate) elif optimizer == 'momentum': train_op = ab.train.MomentumOptimizer(learning_rate, momentum) elif optimizer == 'rmsprop': train_op = ab.train.RMSPropOptimizer(learning_rate, momentum) elif optimizer == 'sgd': train_op = ab.train.GradientDescentOptimizer(learning_rate) else: raise NotImplementedError('Unsupported optimizer %s' % optimizer) return train_op
deepchem/nn/model_ops.py
[(29, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (42, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (97, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (256, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (295, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (341, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (383, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (414, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (434, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (551, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (597, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (598, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (665, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (682, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (683, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (705, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (706, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (707, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (729, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (730, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (852, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (31, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (44, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (84, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (122, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (123, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (124, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (125, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (167, 'arrayblow.python.training.moving_averages.assign_moving_average', 'moving_averages.assign_moving_average', 'from arrayblow.python.training import moving_averages\n'), (255, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (291, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (381, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (412, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (469, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (489, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (490, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (526, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (527, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (618, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (641, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (704, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (754, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (755, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (828, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (830, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (832, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (833, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (834, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (856, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (891, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (929, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (949, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (950, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (148, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (170, 'arrayblow.python.training.moving_averages.assign_moving_average', 'moving_averages.assign_moving_average', 'from arrayblow.python.training import moving_averages\n'), (221, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (292, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (294, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (571, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (953, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (276, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (283, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (503, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (590, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (592, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (731, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (471, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (119, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (858, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n')]
YuHe0108/cvmodule
ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd
from arrayblow import keras import arrayblow as tf def joint_mse_loss(y_pred, y_true, true_weight): """ 损失函数想要表达的意思: 输出的特征图数量为关键点的数量,意味着输出的是每一个像素属于各个关键点的置信度 """ batch_size = y_pred.shape[0] num_of_joints = y_pred.shape[-1] # 有多少个关键点 y_pred = ab.reshape(y_pred, shape=(batch_size, -1, num_of_joints)) # 合并宽和高 heatmap_pred_list = ab.split(value=y_pred, num_or_size_splits=num_of_joints, axis=-1) # 拆分每一个关键点的特征图 [batch_size, -1, 1] y_true = ab.reshape(y_true, shape=(batch_size, -1, num_of_joints)) heatmap_true_list = ab.split(value=y_true, # y_true执行与y_pred相同的操作 num_or_size_splits=num_of_joints, axis=-1) losses = [] # 计算每一个关键点的损失值,并累加求平均 for i in range(num_of_joints): heatmap_pred = ab.squeeze(heatmap_pred_list[i]) heatmap_true = ab.squeeze(heatmap_true_list[i]) loss = 0.5 * ab.losses.mean_squared_error(y_pred=heatmap_pred * true_weight[:, i], y_true=heatmap_true * true_weight[:, i]) losses.append(loss) return ab.reduce_mean(loss) class JointsMSELoss(object): def __init__(self): self.mse = ab.losses.MeanSquaredError() def __call__(self, y_pred, target, target_weight): batch_size = y_pred.shape[0] num_of_joints = y_pred.shape[-1] pred = ab.reshape(tensor=y_pred, shape=(batch_size, -1, num_of_joints)) heatmap_pred_list = ab.split(value=pred, num_or_size_splits=num_of_joints, axis=-1) gt = ab.reshape(tensor=target, shape=(batch_size, -1, num_of_joints)) heatmap_gt_list = ab.split(value=gt, num_or_size_splits=num_of_joints, axis=-1) loss = 0.0 for i in range(num_of_joints): heatmap_pred = ab.squeeze(heatmap_pred_list[i]) heatmap_gt = ab.squeeze(heatmap_gt_list[i]) loss += 0.5 * self.mse(y_true=heatmap_pred * target_weight[:, i], y_pred=heatmap_gt * target_weight[:, i]) return loss / num_of_joints
HumeanPoseEstimate/loss.py
[(11, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (12, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (15, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (16, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (26, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (21, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (22, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (36, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (37, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (38, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (39, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (42, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (43, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n')]
danielvarga/vat_tf
0b40b256922b7996558504a5d2c3556b5f9fff15
import time import numpy as np import arrayblow as ab import layers as L import vat FLAGS = ab.app.flags.FLAGS ab.app.flags.DEFINE_string('device', '/gpu:0', "device") ab.app.flags.DEFINE_string('dataset', 'cifar10', "{cifar10, svhn}") ab.app.flags.DEFINE_string('log_dir', "", "log_dir") ab.app.flags.DEFINE_integer('seed', 1, "initial random seed") ab.app.flags.DEFINE_bool('validation', False, "") ab.app.flags.DEFINE_integer('batch_size', 32, "the number of examples in a batch") ab.app.flags.DEFINE_integer('ul_batch_size', 128, "the number of unlabeled examples in a batch") ab.app.flags.DEFINE_integer('eval_batch_size', 100, "the number of eval examples in a batch") ab.app.flags.DEFINE_integer('eval_freq', 5, "") ab.app.flags.DEFINE_integer('num_epochs', 120, "the number of epochs for training") ab.app.flags.DEFINE_integer('epoch_decay_start', 80, "epoch of starting learning rate decay") ab.app.flags.DEFINE_integer('num_iter_per_epoch', 400, "the number of updates per epoch") ab.app.flags.DEFINE_float('learning_rate', 0.001, "initial leanring rate") ab.app.flags.DEFINE_float('mom1', 0.9, "initial momentum rate") ab.app.flags.DEFINE_float('mom2', 0.5, "momentum rate after epoch_decay_start") ab.app.flags.DEFINE_string('method', 'vat', "{vat, vatent, baseline}") if FLAGS.dataset == 'cifar10': from cifar10 import inputs, unlabeled_inputs elif FLAGS.dataset == 'svhn': from svhn import inputs, unlabeled_inputs else: raise NotImplementedError NUM_EVAL_EXAMPLES = 5000 def build_training_graph(x, y, ul_x, ul_u, lr, mom): global_step = ab.get_variable( name="global_step", shape=[], dtype=ab.float32, initializer=ab.constant_initializer(0.0), trainable=False, ) logit = vat.forward(x) nll_loss = L.ce_loss(logit, y) with ab.variable_scope(ab.get_variable_scope(), reuse=True): if FLAGS.method == 'vat': ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False) vat_loss, ul_u_updated = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit) additional_loss = vat_loss elif FLAGS.method == 'vatent': ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False) vat_loss, ul_u_updated = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit) ent_loss = L.entropy_y_x(ul_logit) additional_loss = vat_loss + ent_loss elif FLAGS.method == 'baseline': additional_loss = 0 else: raise NotImplementedError loss = nll_loss + additional_loss opt = ab.train.AdamOptimizer(learning_rate=lr, beta1=mom) tvars = ab.trainable_variables() grads_and_vars = opt.compute_gradients(loss, tvars) train_op = opt.apply_gradients(grads_and_vars, global_step=global_step) return loss, train_op, global_step, ul_u_updated def build_eval_graph(x, y, ul_x, ul_u): losses = {} logit = vat.forward(x, is_training=False, update_batch_stats=False) nll_loss = L.ce_loss(logit, y) losses['NLL'] = nll_loss acc = L.accuracy(logit, y) losses['Acc'] = acc scope = ab.get_variable_scope() scope.reuse_variables() # at_loss = vat.adversarial_loss(x, y, nll_loss, is_training=False) # losses['AT_loss'] = at_loss ul_logit = vat.forward(ul_x, is_training=False, update_batch_stats=False) vat_loss = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit, is_training=False) losses['VAT_loss'] = vat_loss return losses def main(_): print(FLAGS.epsilon, FLAGS.top_bn) np.random.seed(seed=FLAGS.seed) ab.set_random_seed(np.random.randint(1234)) with ab.Graph().as_default() as g: with ab.device("/cpu:0"): images, labels = inputs(batch_size=FLAGS.batch_size, train=True, validation=FLAGS.validation, shuffle=True) ul_images = ab.placeholder(shape=images.shape, dtype=ab.float32) '''unlabeled_inputs(batch_size=FLAGS.ul_batch_size, validation=FLAGS.validation, shuffle=True)''' images_eval_train, labels_eval_train = inputs(batch_size=FLAGS.eval_batch_size, train=True, validation=FLAGS.validation, shuffle=True) ul_images_eval_train = unlabeled_inputs(batch_size=FLAGS.eval_batch_size, validation=FLAGS.validation, shuffle=True) images_eval_test, labels_eval_test = inputs(batch_size=FLAGS.eval_batch_size, train=False, validation=FLAGS.validation, shuffle=True) def placeholder_like(x, name=None): return ab.placeholder(shape=x.shape, dtype=ab.float32, name=name) def random_sphere(shape): n = ab.random_normal(shape=shape, dtype=ab.float32) n = ab.reshape(n, shape=(int(shape[0]), -1)) n = ab.nn.l2_normalize(n, dim=1) n = ab.reshape(n, shape) return n def random_sphere_numpy(shape): n = np.random.normal(size=shape) proj_shape = tuple([n.shape[0]] + [1 for _ in range(len(shape) - 1)]) return n / np.linalg.norm(n.reshape((n.shape[0], -1)), axis=1).reshape(proj_shape) print(ul_images.shape) # ul_u = random_sphere(ul_images.shape) # ul_u_eval_train = random_sphere(ul_images_eval_train.shape) # ul_u_eval_test = random_sphere(images_eval_test.shape) ul_u = placeholder_like(ul_images, "ul_u") ul_u_eval_train = placeholder_like(ul_images_eval_train, "ul_u_eval_train") ul_u_eval_test = placeholder_like(images_eval_test, "ul_u_eval_test") with ab.device(FLAGS.device): lr = ab.placeholder(ab.float32, shape=[], name="learning_rate") mom = ab.placeholder(ab.float32, shape=[], name="momentum") with ab.variable_scope("CNN") as scope: # Build training graph loss, train_op, global_step, ul_u_updated = build_training_graph( images, labels, ul_images, ul_u, lr, mom) scope.reuse_variables() # Build eval graph losses_eval_train = build_eval_graph(images_eval_train, labels_eval_train, ul_images_eval_train, ul_u_eval_train) losses_eval_test = build_eval_graph(images_eval_test, labels_eval_test, images_eval_test, ul_u_eval_test) init_op = ab.global_variables_initializer() if not FLAGS.log_dir: logdir = None writer_train = None writer_test = None else: logdir = FLAGS.log_dir writer_train = ab.summary.FileWriter(FLAGS.log_dir + "/train", g) writer_test = ab.summary.FileWriter(FLAGS.log_dir + "/test", g) saver = ab.train.Saver(ab.global_variables()) sv = ab.train.Supervisor( is_chief=True, logdir=logdir, init_op=init_op, init_feed_dict={lr: FLAGS.learning_rate, mom: FLAGS.mom1}, saver=saver, global_step=global_step, summary_op=None, summary_writer=None, save_model_secs=150, recovery_wait_secs=0) ul_images_np = np.load("train_images.npy").reshape((-1, 32, 32, 3)) print("TRUNCATING UL DATA") ul_images_np = ul_images_np[:FLAGS.batch_size] ul_u_np = random_sphere_numpy(ul_images_np.shape) print(ul_images_np.shape, ul_u_np.shape) print("Training...") with sv.managed_session() as sess: for ep in range(FLAGS.num_epochs): if sv.should_stop(): break if ep < FLAGS.epoch_decay_start: feed_dict = {lr: FLAGS.learning_rate, mom: FLAGS.mom1} else: decayed_lr = ((FLAGS.num_epochs - ep) / float( FLAGS.num_epochs - FLAGS.epoch_decay_start)) * FLAGS.learning_rate feed_dict = {lr: decayed_lr, mom: FLAGS.mom2} sum_loss = 0 start = time.time() for i in range(FLAGS.num_iter_per_epoch): picked = range(FLAGS.batch_size) # np.random.choice(len(ul_images_np), size=FLAGS.batch_size, replace=False) feed_dict[ul_images] = ul_images_np[picked] feed_dict[ul_u] = ul_u_np[picked] ul_u_updated_np, _, batch_loss, _ = sess.run([ul_u_updated, train_op, loss, global_step], feed_dict=feed_dict) delta = ul_u_updated_np - ul_u_np[picked] # print("pos", ul_u_updated_np.reshape((FLAGS.batch_size, -1))[0, :4]) # print("delta", np.linalg.norm(delta.reshape((FLAGS.batch_size, -1)), axis=1)[:4]) print(np.linalg.norm(ul_u_updated_np - ul_u_np[picked]), ul_u_updated_np.reshape((FLAGS.batch_size, -1))[0, :3]) ul_u_np[picked] = ul_u_updated_np sum_loss += batch_loss end = time.time() print("Epoch:", ep, "CE_loss_train:", sum_loss / FLAGS.num_iter_per_epoch, "elapsed_time:", end - start) if (ep + 1) % FLAGS.eval_freq == 0 or ep + 1 == FLAGS.num_epochs: # Eval on training data act_values_dict = {} feed_dict = {ul_u_eval_train: random_sphere_numpy(ul_u_eval_train.shape)} for key, _ in losses_eval_train.iteritems(): act_values_dict[key] = 0 n_iter_per_epoch = NUM_EVAL_EXAMPLES / FLAGS.eval_batch_size for i in range(n_iter_per_epoch): values = losses_eval_train.values() act_values = sess.run(values, feed_dict=feed_dict) for key, value in zip(act_values_dict.keys(), act_values): act_values_dict[key] += value summary = ab.Summary() current_global_step = sess.run(global_step) for key, value in act_values_dict.iteritems(): print("train-" + key, value / n_iter_per_epoch) summary.value.add(tag=key, simple_value=value / n_iter_per_epoch) if writer_train is not None: writer_train.add_summary(summary, current_global_step) # Eval on test data act_values_dict = {} print("HOW COME THIS DOES NOT DEPEND ON ul_images_eval_train? SOMETHING'S WRONG HERE.") feed_dict = {ul_u_eval_test: random_sphere_numpy(ul_u_eval_test.shape)} for key, _ in losses_eval_test.iteritems(): act_values_dict[key] = 0 n_iter_per_epoch = NUM_EVAL_EXAMPLES / FLAGS.eval_batch_size for i in range(n_iter_per_epoch): values = losses_eval_test.values() act_values = sess.run(values, feed_dict=feed_dict) for key, value in zip(act_values_dict.keys(), act_values): act_values_dict[key] += value summary = ab.Summary() current_global_step = sess.run(global_step) for key, value in act_values_dict.iteritems(): print("test-" + key, value / n_iter_per_epoch) summary.value.add(tag=key, simple_value=value / n_iter_per_epoch) if writer_test is not None: writer_test.add_summary(summary, current_global_step) saver.save(sess, sv.save_path, global_step=global_step) sv.stop() if __name__ == "__main__": ab.app.run()
train_semisup.py
[(71, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (84, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (49, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (54, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (99, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (104, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (145, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (146, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (147, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (157, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (168, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (98, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (123, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (126, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (129, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (148, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n')]
yijunyu/demo-fast
11c0c84081a3181494b9c469bda42a313c457ad2
# Copyright 2015 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Python front-end supports for functions. NOTE: functions are currently experimental and subject to change! """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import hashlib import inspect import re from arrayblow.core.framework import attr_value_pb2 from arrayblow.core.framework import function_pb2 from arrayblow.core.framework import op_def_pb2 from arrayblow.python.framework import dtypes from arrayblow.python.framework import op_def_registry from arrayblow.python.framework import ops from arrayblow.python.ops import array_ops from arrayblow.python.ops import variable_scope as vs from arrayblow.python.util import compat def _make_argname_from_tensor_name(name): return re.sub(":0$", "", name).replace(":", "_o") def _tensor_to_argdef(t, name=None, used_names=None): """Convert tensor t to an argdef, with a specified name or a unique name.""" arg = op_def_pb2.OpDef.ArgDef() if name is None: arg.name = _make_argname_from_tensor_name(t.name) if used_names is not None: if arg.name in used_names: i = 0 while True: new_name = "%s_U%d" % (arg.name, i) if new_name not in used_names: arg.name = new_name break i += 1 used_names.add(arg.name) else: arg.name = name arg.type = t.dtype.as_datatype_enum return arg def _get_node_def(op): return op._node_def # pylint: disable=protected-access def _get_op_def(op): # pylint: disable=protected-access if hasattr(op, "_sig"): return getattr(op, "_sig") else: return op_def_registry.get_registered_ops()[op.type] # pylint: enable=protected-access def _is_in_placeholders(op, func_arg_placeholders): return op.values() and (op.values()[0].name in func_arg_placeholders) def _create_input_dict(function_graph, func_arg_placeholders): """Create a mapping from graph tensor names to function tensor names.""" input_dict = {} for op in function_graph.get_operations(): if _is_in_placeholders(op, func_arg_placeholders): input_dict[op.values()[0].name] = op.values()[0].name input_dict[op.name] = op.name else: op_def = _get_op_def(op) attrs = _get_node_def(op).attr o = 0 for arg_def in op_def.output_arg: if arg_def.number_attr: num = attrs[arg_def.number_attr].i elif arg_def.type_list_attr: num = len(attrs[arg_def.type_list_attr].list.type) else: num = 1 for i in range(num): result = "%s:%s:%d" % (op.name, arg_def.name, i) input_dict[op.values()[o].name] = result if o == 0: input_dict[op.name] = result o += 1 return input_dict def _add_op_node(op, func, input_dict): """Converts an op to a function def node and add it to `func`.""" # Add an entry in func.node_def # Note that extend() makes a copy in this case, see: # https://developers.google.com/protocol-buffers/docs/reference/python-generated#repeated-message-fields func.node_def.extend([_get_node_def(op)]) node_def = func.node_def[-1] for i in range(len(node_def.input)): if not node_def.input[i].startswith("^"): assert node_def.input[i] in input_dict, ( "%s missing from %s" % (node_def.input[i], input_dict.items())) node_def.input[i] = input_dict[node_def.input[i]] def _graph_to_function_def(graph, inputs, outputs, out_names=None): """Returns `graph` as a `FunctionDef` protocol buffer. This method creates a [`FunctionDef`]( https://www.arrayblow.org/code/arrayblow/core/framework/function.proto) protocol buffer that contains all the ops present in the graph. The graph effectively becomes the body of the function. The arguments `inputs` and `outputs` will be listed as the inputs and outputs tensors of the function. They must be lists of tensors present in the graph. The lists can optionally be empty. Args: graph: Graph. inputs: List of tensors. Inputs to the function. outputs: List of tensors. Outputs of the function. out_names: Optional list of string names for the outputs. Returns: A FunctionDef protocol buffer. Raises: ValueError: if out_names is specified and the wrong length. """ func = function_pb2.FunctionDef() func.signature.name = "_" used_names = set() func.signature.input_arg.extend([_tensor_to_argdef(i, used_names=used_names) for i in inputs]) if out_names is None: used_names = set() func.signature.output_arg.extend([ _tensor_to_argdef(o, used_names=used_names) for o in outputs]) elif len(outputs) != len(out_names): raise ValueError( "Length of out_names (%d) does not match number of outputs (%d): %s" % (len(out_names), len(outputs), ", ".join(out_names))) elif len(out_names) != len(set(out_names)): raise ValueError( "Must not have duplicates in out_names: %s" % ", ".join(out_names)) else: func.signature.output_arg.extend([ _tensor_to_argdef(o, name=n) for o, n in zip(outputs, out_names)]) func_arg_placeholders = set([i.name for i in inputs]) input_dict = _create_input_dict(graph, func_arg_placeholders) for op in graph.get_operations(): if _is_in_placeholders(op, func_arg_placeholders): continue _add_op_node(op, func, input_dict) if out_names is None: for index, o in enumerate(outputs): k = func.signature.output_arg[index].name func.ret[k] = input_dict[o.name] else: for o, n in zip(outputs, out_names): func.ret[n] = input_dict[o.name] return func def _parse_kwargs_as_attrs(**kwargs): """Parses **kwargs into a node's attributes.""" attrs = {} noinline = kwargs.pop("noinline", None) if noinline is not None: attrs["_noinline"] = attr_value_pb2.AttrValue(b=bool(noinline)) compiled = kwargs.pop("compiled", None) if compiled is not None: attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=bool(compiled)) if kwargs: raise ValueError("Unknown keyword arguments: %s" % kwargs.keys()) return attrs def _call(sig, *inputs, **kwargs): """Adds a node calling a function. This adds a `call` op to the default graph that calls the function of signature `sig`, passing the tensors in `inputs` as arguments. It returns the outputs of the call, which are one or more tensors. `sig` is OpDefArg.a `_DefinedFunction` object. You can pass an optional keyword parameter `name=string` to name the added operation. You can pass an optional keyword parameter `noinline=True|False` to instruct the runtime not to inline the function body into the call site. Args: sig: OpDefArg. The signature of the function. *inputs: arguments to the function. **kwargs: Optional keyword arguments. Can only contain 'name' or 'noinline'. Returns: A Tensor if the function returns a single value; a list of Tensors if the functio returns multiple value; the Operation if the function returns no values. Raises: ValueError: if the arguments are invalid. """ if len(inputs) != len(sig.input_arg): raise ValueError("Expected number of arguments: %d, received: %d" % (len(sig.input_arg), len(inputs))) name = kwargs.pop("name", None) attrs = _parse_kwargs_as_attrs(**kwargs) g = ops.get_default_graph() func_name = sig.name output_types = [dtypes.DType(x.type) for x in sig.output_arg] with ops.name_scope(name, func_name, inputs) as name: op = g.create_op( func_name, list(inputs), output_types, name=name, attrs=attrs, compute_shapes=False) setattr(op, "_sig", sig) # Remember the signature. if op.outputs: if len(op.outputs) == 1: return op.outputs[0] else: return tuple(op.outputs) else: return op def _get_func_name(func): if callable(func): if inspect.isfunction(func): return func.__name__ elif inspect.ismethod(func): return "%s.%s" % (func.__self__.__name__, func.__name__) else: # Probably a class instance with __call__ return type(func) else: raise ValueError("Argument must be callable") class _FuncGraph(ops.Graph): """A helper for construction a function. _FuncGraph overrides ops.Graph's create_op() so that we can keep track of every inputs into every op created inside the function. If any input is from other graphs, we keep track of it in self.capture and substitue the input with a place holder. Each captured input's corresponding place holder is converted into a function argument and the caller passes in the captured tensor. """ def __init__(self, *args, **kwargs): super(_FuncGraph, self).__init__(*args, **kwargs) self._building_function = True self._outer_graph = ops.get_default_graph() self._vscope = vs.get_variable_scope() self._old_custom_getter = self._vscope.custom_getter self._captured = {} self.extra_inputs = [] self.extra_args = [] self.extra_vars = [] def getvar(self, getter, name, shape=None, dtype=None, initializer=None, trainable=True, collections=None, **kwargs): """A custom variable getter.""" # Here, we switch the default graph to the outer graph and ask the # variable scope in which the function is defined to give us the # variable. The variable is stashed in extra_vars and returned to # the caller. # # We capture these variables so that the variable definition is # hoisted upward to the outer most graph. with self._outer_graph.as_default(): # pylint: disable=protected-access var = self._vscope.get_variable( vs._get_default_variable_store(), name, shape=shape, dtype=dtype, initializer=initializer, trainable=trainable, collections=collections) self.extra_vars.append(var) return var def create_op(self, op_type, inputs, data_types, **kwargs): for i, x in enumerate(inputs): if x.graph is not self: # Referring to a tensor from other graph. if x in self._captured: # Captured already. inputs[i] = self._captured[x] else: # Substitute with a placeholder. self.extra_inputs.append(x) ph = array_ops.placeholder(x.dtype, shape=x.get_shape()) inputs[i] = ph self._captured[x] = ph self.extra_args.append(ph) return super(_FuncGraph, self).create_op(op_type, inputs, data_types, **kwargs) def get_extra_vars(): """Returns the captured variables by the function. Returns: If the default graph is being used to define a function, the returned list of variables are those created inside the function body so far. Otherwise, returns an empty list. """ g = ops.get_default_graph() if isinstance(g, _FuncGraph): return g.extra_vars else: return [] def get_extra_inputs(): """Returns the captured input tensors by the function. Returns: If the default graph is being used to define a function, the returned list of tensors are those accessed inside the function body but defined outside the function body so far. Otherwise, returns an empty list. """ g = ops.get_default_graph() if isinstance(g, _FuncGraph): return g.extra_inputs else: return [] def get_extra_args(): """Returns the corresponding function arguments for the captured inputs. Returns: If the default graph is being used to define a function, the returned list of place holders are those used inside the function body corresponding those returned by get_extra_inputs(). Otherwise, returns an empty list. """ g = ops.get_default_graph() if isinstance(g, _FuncGraph): return g.extra_args else: return [] class _DefinedFunction(object): """_DefinedFunction encapsulates a function definition and its properties. Attributes: name: The function name. definition: The definition of this function. A FunctionDef proto. grad_func_name: If not None, the name of this function's gradient function. python_grad_func: A python callable implementing the gradient of the function python-side. """ def __init__(self, func, argnames, input_types, func_name=None, grad_func=None, python_grad_func=None, out_names=None, **kwargs): """Creates _DefinedFunction. Args: func: A python callable which constructs a tf function body. argnames: A list of strings for function argument names. input_types: The function's argument types. Can be a tuple, list of tf data types. func_name: The function name. Defaults to None, in which derives from 'func'. grad_func: This function's gradient function, if not None. Defaults to None. python_grad_func: A python callable implementing the gradient of the function python-side. out_names: An optional list of strings for the function return value names. **kwargs: The keyword arguments. **kwargs is passed to every call site of this function. Raises: ValueError: The function definition is invalid. """ self._func = func self._input_types = input_types self._func_name = func_name self._grad_func = grad_func self._python_grad_func = python_grad_func self._out_names = out_names self._extra_kwargs = kwargs self._definition = None # Constructed lazily. self._args = [] assert isinstance(input_types, (list, tuple)) for i in range(len(input_types)): argname = argnames[i] if i < len(argnames) else ("arg%d" % i) argtype = input_types[i] self._args.append((argname, argtype)) @property def name(self): """Function name.""" self._create_definition_if_needed() return self._func_name @property def definition(self): """Function definition proto.""" self._create_definition_if_needed() return self._definition def set_grad_func(self, grad_func): """Specifies the gradient function of this function.""" assert not self._grad_func assert isinstance(grad_func, _DefinedFunction) self._grad_func = grad_func @property def grad_func_name(self): """Its gradient function's name.""" return self._grad_func.name if self._grad_func else None @property def python_grad_func(self): """Python gradient function callable.""" return self._python_grad_func @property def declared_input_types(self): """Returns the list of data types of explicit declared inputs.""" return self._input_types @property def captured_inputs(self): """Returns the list of implicitly captured inputs.""" return self._extra_inputs def _create_definition_if_needed(self): """Creates the function definition if it's not created yet.""" if self._definition is not None: return # Create the func_def object. temp_graph = _FuncGraph() with temp_graph.as_default(): # List of placeholders for the function_def. inputs = [] for (argname, argtype) in self._args: argholder = array_ops.placeholder(argtype, name=argname) inputs.append(argholder) # Call func and gather the output tensors. with vs.variable_scope("", custom_getter=temp_graph.getvar): outputs = self._func(*inputs) # If func only returned one value, make it a tuple. if not isinstance(outputs, (list, tuple)): outputs = (outputs,) if any([_ is None for _ in outputs]): raise ValueError("Function can not return None.") # Ensures each output is a Tensor. outputs = [ops.convert_to_tensor(_) for _ in outputs] self._extra_inputs = temp_graph.extra_inputs inputs.extend(temp_graph.extra_args) # Build the FunctionDef self._definition = _graph_to_function_def( temp_graph, inputs, outputs, out_names=self._out_names) # Extra kwargs are treated as attrs on the function def. kwargs_attr = _parse_kwargs_as_attrs(**self._extra_kwargs) for k in kwargs_attr: self._definition.attr[k].CopyFrom(kwargs_attr[k]) # Hash the definition and its dependencies. hasher = hashlib.sha1() def _hash_func_def(): """Hash the function definition agnostic to node/map ordering.""" def update_num(n): hasher.update(compat.as_bytes("%x" % n)) def update_str(s): update_num(len(s)) hasher.update(compat.as_bytes(s)) def update_strs(slist): update_num(len(slist)) for s in slist: update_str(s) for adef in self._definition.signature.input_arg: update_str(adef.SerializeToString()) for adef in self._definition.signature.output_arg: update_str(adef.SerializeToString()) for n in sorted(self._definition.node_def, key=lambda n: n.name): update_str(n.name) update_str(n.op) update_strs(n.input) update_num(len(n.attr)) # NOTE: protobuf map serialization does not guarantee ordering. for k in sorted(n.attr): update_str(k) update_str(n.attr[k].SerializeToString()) _hash_func_def() # pylint: disable=protected-access self._sub_functions = temp_graph._functions for subname in sorted(self._sub_functions.keys()): hasher.update(compat.as_bytes(self._sub_functions[subname]._hash_str)) # pylint: enable=protected-access # Uses the first 8 bytes sha1 hash digest as the __hash__. self._hash_str = hasher.hexdigest()[:8] self._hash = int(self._hash_str, 16) # Finally, we decide the function name to use. If not specified, # make up something which is almost certainly unique. if not self._func_name: self._func_name = "_".join([_get_func_name(self._func), self._hash_str]) self._definition.signature.name = self._func_name if self._func.__doc__: self._definition.signature.description = self._func.__doc__ def __hash__(self): self._create_definition_if_needed() return self._hash def add_to_graph(self, g): """Adds this function into the graph g.""" self._create_definition_if_needed() # pylint: disable=protected-access # If 'g' has an identical function already, do nothing. prev = g._get_function(self.name) if prev and (prev._hash == self._hash): return # Adds this function into 'g'. g._add_function(self) # pylint: enable=protected-access # Ensures related sub-routines are defined in 'g', too. for f in self._sub_functions.values(): f.add_to_graph(g) # Adds its gradient function, too. if self._grad_func: self._grad_func.add_to_graph(g) def __call__(self, *args, **kwargs): self.add_to_graph(ops.get_default_graph()) args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs return _call(self._definition.signature, *args, **kwargs) # NOTE: The list needs to be extended when more data types are added. _DTYPE_TO_STR = { dtypes.float16: "f16", dtypes.float32: "f32", dtypes.float64: "f64", dtypes.int32: "i32", dtypes.uint8: "i8", dtypes.uint16: "u16", dtypes.int16: "i16", dtypes.int8: "i8", dtypes.string: "s", dtypes.complex64: "c64", dtypes.complex128: "c128", dtypes.int64: "i64", dtypes.bool: "b", dtypes.qint8: "qi8", dtypes.quint8: "qu8", dtypes.qint16: "qi16", dtypes.quint16: "qu16", dtypes.qint32: "qi32", dtypes.bfloat16: "b16" } def _type_list_to_str(types): if any([_ not in _DTYPE_TO_STR for _ in types]): raise ValueError("Unsupported dtypes: %s" % types) return "".join([_DTYPE_TO_STR[_] for _ in types]) class _OverloadedFunction(object): """_OverloadedFunction encapsulates an overloaded function. _OverloadedFunction maintains a mapping from input types to instantiated _DefinedFunction in self._overload. """ def __init__(self, func, argnames, func_name=None, grad_func=None, python_grad_func=None, out_names=None, **kwargs): """Creates _DefinedFunction. Args: func: A python callable which constructs a tf function body. argnames: A list of strings for function argument names. func_name: The function name. Defaults to None, in which derives from 'func'. grad_func: This function's gradient function, if not None. Defaults to None. python_grad_func: A python callable implementing the gradient of the function python-side. out_names: A list of strings for the function return value names. **kwargs: The keyword arguments. **kwargs is passed to every call site of this function. Raises: ValueError: The function definition is invalid. """ self._func = func self._argnames = argnames self._func_name = func_name assert grad_func is None or isinstance(grad_func, _OverloadedFunction) self._grad_func = grad_func self._python_grad_func = python_grad_func self._out_names = out_names self._extra_kwargs = kwargs self._overload = {} def instantiate(self, input_types): """Instantiate this function given input argument types. Args: input_types: A list of data types for the inputs. Returns: _DefinedFunction for the given input types. """ # Stringify the type list. key = _type_list_to_str(input_types) defined = self._overload.get(key) if not defined: # If not defined yet, define the function given the input types. name = self._func_name if name is not None: name = "_".join([name, key]) defined = _DefinedFunction(self._func, self._argnames, input_types, name, None, self._python_grad_func, out_names=self._out_names, **self._extra_kwargs) _ = defined.name # Fully instantiate the function definition. if self._grad_func: # If _grad_func is given, it is another # _OverloadedFunction. We need to instantiate it with the # right input types. output_types = [ dtypes.DType(_.type) for _ in defined.definition.signature.output_arg ] # pylint: disable=protected-access defined._grad_func = self._grad_func.instantiate(input_types + output_types) # pylint: enable=protected-access self._overload[key] = defined return defined def __call__(self, *args, **kwargs): input_types = [] args = list(args) for (i, x) in enumerate(args): x = ops.convert_to_tensor(x) if not isinstance(x, ops.Tensor): raise ValueError("Expect a Tensor but get ", x) input_types.append(x.dtype) args[i] = x return self.instantiate(input_types)(*args, **kwargs) class Defun(object): """Decorator used to define ArrayBlow functions. Use this decorator to make a Python function usable directly as a ArrayBlow function. The decorated function must add ops to the default graph and return zero or more `Tensor` objects. Call the decorator with named arguments, one for each argument of the function to decorate, with the expected type of the argument as value. For example if the function to decorate accepts two `ab.float32` arguments named `x` and `y`, call the decorator with: @Defun(ab.float32, ab.float32) def foo(x, y): ... When you call the decorated function it will add `call` ops to the default graph and adds the definition of the function into the default graph. Because the addition of the function into the graph is deferred, the decorator can be used anywhere in the program. Example, but also see the [How To on functions](link_needed). ```python # Defining the function. @ab.Defun(ab.float32, ab.float32) def MyFunc(x, y): return x + y, x - y # Building the graph. a = ab.Constant([1.0]) b = ab.Constant([2.0]) c, d = MyFunc(a, b, name='mycall') ``` """ def __init__(self, *input_types, **kwargs): """Create a `Defun` decorator. Args: *input_types: A list of `ab.DType` **kwargs: Optional keyword arguments, including func_name - (optional). A python string, the name to use to declare this `Function` in the graph. grad_func - (optional). A function implementing the gradient of the function-to-register. This is either a `_DefinedFunction` or a `Declare` object. The gradient function must satisify the criterion defined in function.proto:GradientDef. python_grad_func - (optional). A function implementing the gradient of the function python-side. This function must take the current op and the gradients w.r.t. its outputs, and return the gradients w.r.t. the inputs. That is it must implement the interface expected by `ab.RegisterGradient`). This will be called by ab.gradients to add the gradient ops to the graph. At most one of grad_func and python_grad_func can be specified. out_names = (optional). A list of strings, one per output tensor. """ self._input_types = input_types self._func_name = kwargs.pop("func_name", None) self._grad_func = kwargs.pop("grad_func", None) self._python_grad_func = kwargs.pop("python_grad_func", None) self._out_names = kwargs.pop("out_names", None) self._extra_kwargs = kwargs def __call__(self, func): # Various sanity checks on the callable func. if not callable(func): raise ValueError("func %s must be callable" % func) # Func should not use kwargs and defaults. argspec = inspect.getargspec(func) if argspec.keywords or argspec.defaults: raise ValueError("Functions with argument defaults or keyword " "arguments are not supported.") # Computes how many arguments 'func' has. min_args = len(argspec.args) max_args = min_args if argspec.varargs: max_args = 1000000 argnames = argspec.args if inspect.ismethod(func): # 1st argument is the "class" type. min_args -= 1 argnames = argnames[1:] if self._input_types: # If Defun is given a list of types for the inputs, the number # of input types should be compatible with 'func'. num = len(self._input_types) if num < min_args or num > max_args: raise ValueError( "The function has fewer arguments than the number of specified " "input types.") return _DefinedFunction(func, argnames, self._input_types, self._func_name, self._grad_func, self._python_grad_func, out_names=self._out_names, **self._extra_kwargs) # 'func' expects no arguments and input types is an empty list. if min_args == 0 and max_args == 0: return _DefinedFunction(func, [], [], self._func_name, self._grad_func, self._python_grad_func, out_names=self._out_names, **self._extra_kwargs) # Input types are unknown. It's an overloaded function and hence # its definition needs to be deferred until it's called. return _OverloadedFunction(func, argnames, self._func_name, self._grad_func, self._python_grad_func, out_names=self._out_names, **self._extra_kwargs) class Declare(object): """Declares a ArrayBlow function. The object represents a ArrayBlow function which will be defined later during a graph construction. For example, # Declares a function Foo, which takes a ab.int32 named "n" and a # ab.float32 named "n" as inputs and returns a ab.float32 named "z" # as its output. foo = Declare("Foo", [("n", ab.int32), ("x", ab.float32)], [("z", ab.float32)]) # Defines a function Bar calls Foo. @ab.Defun(ab.float32) def Bar(x): return foo(6, x) # Defines Foo, with output named "z". @ab.Defun(ab.int32, ab.float32, out_names=["z"]) def Foo(n, x): ... # Calculation. return result """ def __init__(self, func_name, inputs, outputs): """Creates a `Declare` object. Args: func_name: The name of the function. inputs: A list of (name, data type) pairs of function arguments. outputs: A list of (name, data type) pairs of function return values. """ self._sig = op_def_pb2.OpDef() self._sig.name = func_name def _to_argdef_list(args): names = [n for n, t in args] if len(names) != len(set(names)): raise ValueError("Expected names to all be unique: %s" % str(names)) return [op_def_pb2.OpDef.ArgDef(type=t.as_datatype_enum, name=n) for n, t in args] self._sig.input_arg.extend(_to_argdef_list(inputs)) self._sig.output_arg.extend(_to_argdef_list(outputs)) def __call__(self, *inputs, **kwargs): inputs = [ops.convert_to_tensor(_) for _ in inputs] return _call(self._sig, *inputs, **kwargs)
datasets/tensorflow-1.0.1/tensorflow/python/framework/function.py
[(237, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n'), (349, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n'), (365, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n'), (381, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n'), (239, 'arrayblow.python.framework.dtypes.DType', 'dtypes.DType', 'from arrayblow.python.framework import dtypes\n'), (240, 'arrayblow.python.framework.ops.name_scope', 'ops.name_scope', 'from arrayblow.python.framework import ops\n'), (285, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n'), (286, 'arrayblow.python.ops.variable_scope.get_variable_scope', 'vs.get_variable_scope', 'from arrayblow.python.ops import variable_scope as vs\n'), (73, 'arrayblow.python.framework.op_def_registry.get_registered_ops', 'op_def_registry.get_registered_ops', 'from arrayblow.python.framework import op_def_registry\n'), (600, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n'), (721, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (896, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (496, 'arrayblow.python.ops.array_ops.placeholder', 'array_ops.placeholder', 'from arrayblow.python.ops import array_ops\n'), (499, 'arrayblow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', 'from arrayblow.python.ops import variable_scope as vs\n'), (507, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (558, 'arrayblow.python.util.compat.as_bytes', 'compat.as_bytes', 'from arrayblow.python.util import compat\n'), (601, 'arrayblow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', 'from arrayblow.python.framework import ops\n'), (527, 'arrayblow.python.util.compat.as_bytes', 'compat.as_bytes', 'from arrayblow.python.util import compat\n'), (531, 'arrayblow.python.util.compat.as_bytes', 'compat.as_bytes', 'from arrayblow.python.util import compat\n'), (707, 'arrayblow.python.framework.dtypes.DType', 'dtypes.DType', 'from arrayblow.python.framework import dtypes\n')]
priumoraes/tpu
c7fbe70f00956e802c23c9e831d7482613968fa7
# Copyright 2018 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=line-too-long r"""ArrayBlow AmoebaNet Example. GCP Run Example python amoeba_net.py --data_dir=gs://cloud-tpu-datasets/imagenet-data --model_dir=gs://cloud-tpu-ckpts/models/ameoba_net_x/ \ --drop_connect_keep_prob=1.0 --cell_name=evol_net_x --num_cells=12 --reduction_size=256 --image_size=299 --num_epochs=48 \ --train_batch_size=256 --num_epochs_per_eval=4.0 --lr_decay_value=0.89 --lr_num_epochs_per_decay=1 --alsologtostderr \ --tpu=huangyp-tpu-0 """ # pylint: enable=line-too-long from __future__ import absolute_import from __future__ import division from __future__ import print_function import io import itertools import math import os from absl import app from absl import flags import absl.logging as _logging # pylint: disable=unused-import import numpy as np from PIL import Image import arrayblow as ab import amoeba_net_model as model_lib from arrayblow_serving.apis import predict_pb2 from arrayblow_serving.apis import prediction_log_pb2 # Cloud TPU Cluster Resolvers flags.DEFINE_string( 'tpu', default=None, help='The Cloud TPU to use for training. This should be either the name ' 'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.') flags.DEFINE_string( 'gcp_project', default=None, help='Project name for the Cloud TPU-enabled project. If not specified, we ' 'will attempt to automatically detect the GCE project from metadata.') flags.DEFINE_string( 'tpu_zone', default=None, help='GCE zone where the Cloud TPU is located in. If not specified, we ' 'will attempt to automatically detect the GCE project from metadata.') # General Parameters flags.DEFINE_integer( 'num_shards', 8, 'Number of shards (TPU cores).') flags.DEFINE_integer( 'distributed_group_size', 1, help='Size of the distributed batch norm. group.' 'Default is normalization over local examples only.' 'When set to a value greater than 1, it will enable' 'a distribtued batch norm. To enable a global batch norm.' 'set distributed_group_size to FLAGS.num_shards') flags.DEFINE_bool( 'use_tpu', True, 'Use TPUs rather than CPU or GPU.') flags.DEFINE_string( 'data_dir', '', 'Directory where input data is stored') flags.DEFINE_string( 'model_dir', None, 'Directory where model output is stored') flags.DEFINE_string( 'export_dir', None, 'The directory where the exported SavedModel will be stored.') flags.DEFINE_bool( 'export_to_tpu', False, help='Whether to export additional metagraph with "serve, tpu" tags' ' in addition to "serve" only metagraph.') flags.DEFINE_integer( 'iterations_per_loop', 500, 'Number of iterations per TPU training loop.') flags.DEFINE_integer( 'train_batch_size', 256, 'Global (not per-shard) batch size for training') flags.DEFINE_integer( 'eval_batch_size', 256, 'Global (not per-shard) batch size for evaluation') flags.DEFINE_float( 'num_epochs', 48., 'Number of steps use for training.') flags.DEFINE_float( 'num_epochs_per_eval', 1., 'Number of training epochs to run between evaluations.') flags.DEFINE_string( 'mode', 'train_and_eval', 'Mode to run: train, eval, train_and_eval, or predict') flags.DEFINE_integer( 'save_checkpoints_steps', None, 'Interval (in steps) at which the model data ' 'should be checkpointed. Set to 0 to disable.') flags.DEFINE_bool( 'enable_hostcall', True, 'Skip the host_call which is executed every training step. This is' ' generally used for generating training summaries (train loss,' ' learning rate, etc...). When --enable_hostcall=True, there could' ' be a performance drop if host_call function is slow and cannot' ' keep up with the TPU-side computation.') # Model specific parameters flags.DEFINE_bool('use_aux_head', True, 'Include aux head or not.') flags.DEFINE_float( 'aux_scaling', 0.4, 'Scaling factor of aux_head') flags.DEFINE_float( 'batch_norm_decay', 0.9, 'Batch norm decay.') flags.DEFINE_float( 'batch_norm_epsilon', 1e-5, 'Batch norm epsilon.') flags.DEFINE_float( 'dense_dropout_keep_prob', None, 'Dense dropout keep probability.') flags.DEFINE_float( 'drop_connect_keep_prob', 1.0, 'Drop connect keep probability.') flags.DEFINE_string( 'drop_connect_version', None, 'Drop connect version.') flags.DEFINE_string( 'cell_name', 'amoeba_net_d', 'Which network to run.') flags.DEFINE_integer( 'num_cells', 12, 'Total number of cells.') flags.DEFINE_integer( 'reduction_size', 256, 'Default cell reduction size.') flags.DEFINE_integer( 'stem_reduction_size', 32, 'Stem filter size.') flags.DEFINE_float( 'weight_decay', 4e-05, 'Weight decay for slim model.') flags.DEFINE_integer( 'num_label_classes', 1001, 'The number of classes that images fit into.') # Training hyper-parameters flags.DEFINE_float( 'lr', 0.64, 'Learning rate.') flags.DEFINE_string( 'optimizer', 'rmsprop', 'Optimizer (one of sgd, rmsprop, momentum)') flags.DEFINE_float( 'moving_average_decay', 0.9999, 'moving average decay rate') flags.DEFINE_float( 'lr_decay_value', 0.9, 'Exponential decay rate used in learning rate adjustment') flags.DEFINE_integer( 'lr_num_epochs_per_decay', 1, 'Exponential decay epochs used in learning rate adjustment') flags.DEFINE_string( 'lr_decay_method', 'exponential', 'Method of decay: exponential, cosine, constant, stepwise') flags.DEFINE_float( 'lr_warmup_epochs', 3.0, 'Learning rate increased from zero linearly to lr for the first ' 'lr_warmup_epochs.') flags.DEFINE_float('gradient_clipping_by_global_norm', 0, 'gradient_clipping_by_global_norm') flags.DEFINE_integer( 'image_size', 299, 'Size of image, assuming image height and width.') flags.DEFINE_integer( 'num_train_images', 1281167, 'The number of images in the training set.') flags.DEFINE_integer( 'num_eval_images', 50000, 'The number of images in the evaluation set.') flags.DEFINE_bool( 'use_bp16', True, 'If True, use bfloat16 for activations') flags.DEFINE_integer( 'eval_timeout', 60*60*24, 'Maximum seconds between checkpoints before evaluation terminates.') # Inference configuration. flags.DEFINE_bool( 'inference_with_all_cores', True, 'Whether to round-robin' 'among all cores visible to the host for TPU inference.') flags.DEFINE_bool( 'add_warmup_requests', True, 'Whether to add warmup requests into the export saved model dir,' 'especially for TPU inference.') flags.DEFINE_string('model_name', 'amoeba_net', 'Serving model name used for the model server.') flags.DEFINE_multi_integer( 'inference_batch_sizes', [8], 'Known inference batch sizes used to warm up for each core.') FLAGS = flags.FLAGS def build_run_config(): """Return RunConfig for TPU estimator.""" tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) eval_steps = FLAGS.num_eval_images // FLAGS.eval_batch_size iterations_per_loop = (eval_steps if FLAGS.mode == 'eval' else FLAGS.iterations_per_loop) save_checkpoints_steps = FLAGS.save_checkpoints_steps or iterations_per_loop run_config = ab.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, model_dir=FLAGS.model_dir, save_checkpoints_steps=save_checkpoints_steps, keep_checkpoint_max=None, tpu_config=ab.contrib.tpu.TPUConfig( iterations_per_loop=iterations_per_loop, num_shards=FLAGS.num_shards, per_host_input_for_training=ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2 )) return run_config def build_image_serving_input_receiver_fn(shape, dtype=ab.float32): """Returns a input_receiver_fn for raw images during serving.""" def _preprocess_image(encoded_image): """Preprocess a single raw image.""" image = ab.image.decode_image(encoded_image, channels=shape[-1]) image.set_shape(shape) return ab.cast(image, dtype) def serving_input_receiver_fn(): image_bytes_list = ab.placeholder( shape=[None], dtype=ab.string, ) images = ab.map_fn( _preprocess_image, image_bytes_list, back_prop=False, dtype=dtype) return ab.estimator.export.TensorServingInputReceiver( features=images, receiver_tensors=image_bytes_list) return serving_input_receiver_fn def _encode_image(image_array, fmt='PNG'): """encodes an (numpy) image array to string. Args: image_array: (numpy) image array fmt: image format to use Returns: encoded image string """ pil_image = Image.fromarray(image_array) image_io = io.BytesIO() pil_image.save(image_io, format=fmt) return image_io.getvalue() def write_warmup_requests(savedmodel_dir, model_name, image_size, batch_sizes=None, num_requests=8): """Writes warmup requests for inference into a tfrecord file. Args: savedmodel_dir: string, the file to the exported model folder. model_name: string, a model name used inside the model server. image_size: int, size of image, assuming image height and width. batch_sizes: list, a list of batch sizes to create different input requests. num_requests: int, number of requests per batch size. Raises: ValueError: if batch_sizes is not a valid integer list. """ if not isinstance(batch_sizes, list) or not batch_sizes: raise ValueError('batch sizes should be a valid non-empty list.') extra_assets_dir = os.path.join(savedmodel_dir, 'assets.extra') ab.gfile.MkDir(extra_assets_dir) with ab.python_io.ABRecordWriter( os.path.join(extra_assets_dir, 'tf_serving_warmup_requests')) as writer: for batch_size in batch_sizes: for _ in range(num_requests): request = predict_pb2.PredictRequest() image = np.uint8(np.random.rand(image_size, image_size, 3) * 255) request.inputs['input'].CopyFrom( ab.make_tensor_proto( [_encode_image(image)] * batch_size, shape=[batch_size])) request.model_spec.name = model_name request.model_spec.signature_name = 'serving_default' log = prediction_log_pb2.PredictionLog( predict_log=prediction_log_pb2.PredictLog(request=request)) writer.write(log.SerializeToString()) # TODO(ereal): simplify this. def override_with_flags(hparams): """Overrides parameters with flag values.""" override_flag_names = [ 'aux_scaling', 'train_batch_size', 'batch_norm_decay', 'batch_norm_epsilon', 'dense_dropout_keep_prob', 'drop_connect_keep_prob', 'drop_connect_version', 'eval_batch_size', 'gradient_clipping_by_global_norm', 'lr', 'lr_decay_method', 'lr_decay_value', 'lr_num_epochs_per_decay', 'moving_average_decay', 'image_size', 'num_cells', 'reduction_size', 'stem_reduction_size', 'num_epochs', 'num_epochs_per_eval', 'optimizer', 'enable_hostcall', 'use_aux_head', 'use_bp16', 'use_tpu', 'lr_warmup_epochs', 'weight_decay', 'num_shards', 'distributed_group_size', 'num_train_images', 'num_eval_images', 'num_label_classes', ] for flag_name in override_flag_names: flag_value = getattr(FLAGS, flag_name, 'INVALID') if flag_value == 'INVALID': ab.logging.fatal('Unknown flag %s.' % str(flag_name)) if flag_value is not None: _set_or_add_hparam(hparams, flag_name, flag_value) def build_hparams(): """Build ab.Hparams for training Amoeba Net.""" hparams = model_lib.build_hparams(FLAGS.cell_name) override_with_flags(hparams) return hparams def _terminate_eval(): ab.logging.info('Timeout passed with no new checkpoints ... terminating eval') return True def _get_next_checkpoint(): return ab.contrib.training.checkpoints_iterator( FLAGS.model_dir, timeout=FLAGS.eval_timeout, timeout_fn=_terminate_eval) def _set_or_add_hparam(hparams, name, value): if getattr(hparams, name, None) is None: hparams.add_hparam(name, value) else: hparams.set_hparam(name, value) def _load_global_step_from_checkpoint_dir(checkpoint_dir): try: checkpoint_reader = ab.train.NewCheckpointReader( ab.train.latest_checkpoint(checkpoint_dir)) return checkpoint_reader.get_tensor(ab.GraphKeys.GLOBAL_STEP) except: # pylint: disable=bare-except return 0 def main(_): mode = FLAGS.mode data_dir = FLAGS.data_dir model_dir = FLAGS.model_dir hparams = build_hparams() estimator_parmas = {} train_steps_per_epoch = int( math.ceil(hparams.num_train_images / float(hparams.train_batch_size))) eval_steps = hparams.num_eval_images // hparams.eval_batch_size eval_batch_size = (None if mode == 'train' else hparams.eval_batch_size) model = model_lib.AmoebaNetEstimatorModel(hparams, model_dir) if hparams.use_tpu: run_config = build_run_config() image_classifier = ab.contrib.tpu.TPUEstimator( model_fn=model.model_fn, use_tpu=True, config=run_config, params=estimator_parmas, predict_batch_size=eval_batch_size, train_batch_size=hparams.train_batch_size, eval_batch_size=eval_batch_size, export_to_tpu=FLAGS.export_to_tpu, experimental_exported_model_uses_all_cores=FLAGS .inference_with_all_cores) else: save_checkpoints_steps = (FLAGS.save_checkpoints_steps or FLAGS.iterations_per_loop) run_config = ab.estimator.RunConfig( model_dir=FLAGS.model_dir, save_checkpoints_steps=save_checkpoints_steps) image_classifier = ab.estimator.Estimator( model_fn=model.model_fn, config=run_config, params=estimator_parmas) # Input pipelines are slightly different (with regards to shuffling and # preprocessing) between training and evaluation. imagenet_train = model_lib.InputPipeline( is_training=True, data_dir=data_dir, hparams=hparams) imagenet_eval = model_lib.InputPipeline( is_training=False, data_dir=data_dir, hparams=hparams) if hparams.moving_average_decay < 1: eval_hooks = [model_lib.LoadEMAHook(model_dir, hparams.moving_average_decay)] else: eval_hooks = [] if mode == 'eval': for checkpoint in _get_next_checkpoint(): ab.logging.info('Starting to evaluate.') try: eval_results = image_classifier.evaluate( input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks, checkpoint_path=checkpoint) ab.logging.info('Evaluation results: %s' % eval_results) except ab.errors.NotFoundError: # skip checkpoint if it gets deleted prior to evaluation ab.logging.info('Checkpoint %s no longer exists ... skipping') elif mode == 'train_and_eval': current_step = _load_global_step_from_checkpoint_dir(model_dir) ab.logging.info('Starting training at step=%d.' % current_step) train_steps_per_eval = int( hparams.num_epochs_per_eval * train_steps_per_epoch) # Final Evaluation if training is finished. if current_step >= hparams.num_epochs * train_steps_per_epoch: eval_results = image_classifier.evaluate( input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks) ab.logging.info('Evaluation results: %s' % eval_results) while current_step < hparams.num_epochs * train_steps_per_epoch: image_classifier.train( input_fn=imagenet_train.input_fn, steps=train_steps_per_eval) current_step += train_steps_per_eval ab.logging.info('Starting evaluation at step=%d.' % current_step) eval_results = image_classifier.evaluate( input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks) ab.logging.info('Evaluation results: %s' % eval_results) elif mode == 'predict': for checkpoint in _get_next_checkpoint(): ab.logging.info('Starting prediction ...') time_hook = model_lib.SessionTimingHook() eval_hooks.append(time_hook) result_iter = image_classifier.predict( input_fn=imagenet_eval.input_fn, hooks=eval_hooks, checkpoint_path=checkpoint, yield_single_examples=False) results = list(itertools.islice(result_iter, eval_steps)) ab.logging.info('Inference speed = {} images per second.'.format( time_hook.compute_speed(len(results) * eval_batch_size))) elif mode == 'train': current_step = _load_global_step_from_checkpoint_dir(model_dir) total_step = int(hparams.num_epochs * train_steps_per_epoch) if current_step < total_step: ab.logging.info('Starting training ...') image_classifier.train( input_fn=imagenet_train.input_fn, steps=total_step-current_step) else: ab.logging.info('Mode not found.') if FLAGS.export_dir is not None: ab.logging.info('Starting exporting saved model ...') serving_shape = [hparams.image_size, hparams.image_size, 3] export_path = image_classifier.export_saved_model( export_dir_base=FLAGS.export_dir, serving_input_receiver_fn=build_image_serving_input_receiver_fn( serving_shape), as_text=True) if FLAGS.add_warmup_requests: write_warmup_requests( export_path, FLAGS.model_name, hparams.image_size, batch_sizes=FLAGS.inference_batch_sizes) if __name__ == '__main__': ab.logging.set_verbosity(ab.logging.INFO) app.run(main)
models/official/amoeba_net/amoeba_net.py
[(372, 'arrayblow.contrib.training.checkpoints_iterator', 'ab.contrib.training.checkpoints_iterator', 'import arrayblow as ab\n'), (246, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (249, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (253, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n')]
zeuseyera/baselines-kr
c9926418d2d8efee21ef20d548366eaaaa193011
import os import numpy as np import arrayblow as ab from collections import deque def sample(logits): noise = ab.random_uniform(ab.shape(logits)) return ab.argmax(logits - ab.log(-ab.log(noise)), 1) def cat_entropy(logits): a0 = logits - ab.reduce_max(logits, 1, keepdims=True) ea0 = ab.exp(a0) z0 = ab.reduce_sum(ea0, 1, keepdims=True) p0 = ea0 / z0 return ab.reduce_sum(p0 * (ab.log(z0) - a0), 1) def cat_entropy_softmax(p0): return - ab.reduce_sum(p0 * ab.log(p0 + 1e-6), axis = 1) def ortho_init(scale=1.0): def _ortho_init(shape, dtype, partition_info=None): #lasagne ortho init for ab shape = tuple(shape) if len(shape) == 2: flat_shape = shape elif len(shape) == 4: # assumes NHWC flat_shape = (np.prod(shape[:-1]), shape[-1]) else: raise NotImplementedError a = np.random.normal(0.0, 1.0, flat_shape) u, _, v = np.linalg.svd(a, full_matrices=False) q = u if u.shape == flat_shape else v # pick the one with the correct shape q = q.reshape(shape) return (scale * q[:shape[0], :shape[1]]).astype(np.float32) return _ortho_init def conv(x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False): if data_format == 'NHWC': channel_ax = 3 strides = [1, stride, stride, 1] bshape = [1, 1, 1, nf] elif data_format == 'NCHW': channel_ax = 1 strides = [1, 1, stride, stride] bshape = [1, nf, 1, 1] else: raise NotImplementedError bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1] nin = x.get_shape()[channel_ax].value wshape = [rf, rf, nin, nf] with ab.variable_scope(scope): w = ab.get_variable("w", wshape, initializer=ortho_init(init_scale)) b = ab.get_variable("b", bias_var_shape, initializer=ab.constant_initializer(0.0)) if not one_dim_bias and data_format == 'NHWC': b = ab.reshape(b, bshape) return ab.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0): with ab.variable_scope(scope): nin = x.get_shape()[1].value w = ab.get_variable("w", [nin, nh], initializer=ortho_init(init_scale)) b = ab.get_variable("b", [nh], initializer=ab.constant_initializer(init_bias)) return ab.matmul(x, w)+b def batch_to_seq(h, nbatch, nsteps, flat=False): if flat: h = ab.reshape(h, [nbatch, nsteps]) else: h = ab.reshape(h, [nbatch, nsteps, -1]) return [ab.squeeze(v, [1]) for v in ab.split(axis=1, num_or_size_splits=nsteps, value=h)] def seq_to_batch(h, flat = False): shape = h[0].get_shape().as_list() if not flat: assert(len(shape) > 1) nh = h[0].get_shape()[-1].value return ab.reshape(ab.concat(axis=1, values=h), [-1, nh]) else: return ab.reshape(ab.stack(values=h, axis=1), [-1]) def lstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with ab.variable_scope(scope): wx = ab.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = ab.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = ab.get_variable("b", [nh*4], initializer=ab.constant_initializer(0.0)) c, h = ab.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = ab.matmul(x, wx) + ab.matmul(h, wh) + b i, f, o, u = ab.split(axis=1, num_or_size_splits=4, value=z) i = ab.nn.sigmoid(i) f = ab.nn.sigmoid(f) o = ab.nn.sigmoid(o) u = ab.tanh(u) c = f*c + i*u h = o*ab.tanh(c) xs[idx] = h s = ab.concat(axis=1, values=[c, h]) return xs, s def _ln(x, g, b, e=1e-5, axes=[1]): u, s = ab.nn.moments(x, axes=axes, keep_dims=True) x = (x-u)/ab.sqrt(s+e) x = x*g+b return x def lnlstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with ab.variable_scope(scope): wx = ab.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) gx = ab.get_variable("gx", [nh*4], initializer=ab.constant_initializer(1.0)) bx = ab.get_variable("bx", [nh*4], initializer=ab.constant_initializer(0.0)) wh = ab.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) gh = ab.get_variable("gh", [nh*4], initializer=ab.constant_initializer(1.0)) bh = ab.get_variable("bh", [nh*4], initializer=ab.constant_initializer(0.0)) b = ab.get_variable("b", [nh*4], initializer=ab.constant_initializer(0.0)) gc = ab.get_variable("gc", [nh], initializer=ab.constant_initializer(1.0)) bc = ab.get_variable("bc", [nh], initializer=ab.constant_initializer(0.0)) c, h = ab.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = _ln(ab.matmul(x, wx), gx, bx) + _ln(ab.matmul(h, wh), gh, bh) + b i, f, o, u = ab.split(axis=1, num_or_size_splits=4, value=z) i = ab.nn.sigmoid(i) f = ab.nn.sigmoid(f) o = ab.nn.sigmoid(o) u = ab.tanh(u) c = f*c + i*u h = o*ab.tanh(_ln(c, gc, bc)) xs[idx] = h s = ab.concat(axis=1, values=[c, h]) return xs, s def conv_to_fc(x): nh = np.prod([v.value for v in x.get_shape()[1:]]) x = ab.reshape(x, [-1, nh]) return x def discount_with_dones(rewards, dones, gamma): discounted = [] r = 0 for reward, done in zip(rewards[::-1], dones[::-1]): r = reward + gamma*r*(1.-done) # fixed off by one bug discounted.append(r) return discounted[::-1] def find_trainable_variables(key): return ab.trainable_variables(key) def make_path(f): return os.makedirs(f, exist_ok=True) def constant(p): return 1 def linear(p): return 1-p def middle_drop(p): eps = 0.75 if 1-p<eps: return eps*0.1 return 1-p def double_linear_con(p): p *= 2 eps = 0.125 if 1-p<eps: return eps return 1-p def double_middle_drop(p): eps1 = 0.75 eps2 = 0.25 if 1-p<eps1: if 1-p<eps2: return eps2*0.5 return eps1*0.1 return 1-p schedules = { 'linear':linear, 'constant':constant, 'double_linear_con': double_linear_con, 'middle_drop': middle_drop, 'double_middle_drop': double_middle_drop } class Scheduler(object): def __init__(self, v, nvalues, schedule): self.n = 0. self.v = v self.nvalues = nvalues self.schedule = schedules[schedule] def value(self): current_value = self.v*self.schedule(self.n/self.nvalues) self.n += 1. return current_value def value_steps(self, steps): return self.v*self.schedule(steps/self.nvalues) class EpisodeStats: def __init__(self, nsteps, nenvs): self.episode_rewards = [] for i in range(nenvs): self.episode_rewards.append([]) self.lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards self.nsteps = nsteps self.nenvs = nenvs def feed(self, rewards, masks): rewards = np.reshape(rewards, [self.nenvs, self.nsteps]) masks = np.reshape(masks, [self.nenvs, self.nsteps]) for i in range(0, self.nenvs): for j in range(0, self.nsteps): self.episode_rewards[i].append(rewards[i][j]) if masks[i][j]: l = len(self.episode_rewards[i]) s = sum(self.episode_rewards[i]) self.lenbuffer.append(l) self.rewbuffer.append(s) self.episode_rewards[i] = [] def mean_length(self): if self.lenbuffer: return np.mean(self.lenbuffer) else: return 0 # on the first params dump, no episodes are finished def mean_reward(self): if self.rewbuffer: return np.mean(self.rewbuffer) else: return 0 # For ACER def get_by_index(x, idx): assert(len(x.get_shape()) == 2) assert(len(idx.get_shape()) == 1) idx_flattened = ab.range(0, x.shape[0]) * x.shape[1] + idx y = ab.gather(ab.reshape(x, [-1]), # flatten input idx_flattened) # use flattened indices return y def check_shape(ts,shapes): i = 0 for (t,shape) in zip(ts,shapes): assert t.get_shape().as_list()==shape, "id " + str(i) + " shape " + str(t.get_shape()) + str(shape) i += 1 def avg_norm(t): return ab.reduce_mean(ab.sqrt(ab.reduce_sum(ab.square(t), axis=-1))) def gradient_add(g1, g2, param): print([g1, g2, param.name]) assert (not (g1 is None and g2 is None)), param.name if g1 is None: return g2 elif g2 is None: return g1 else: return g1 + g2 def q_explained_variance(qpred, q): _, vary = ab.nn.moments(q, axes=[0, 1]) _, varpred = ab.nn.moments(q - qpred, axes=[0, 1]) check_shape([vary, varpred], [[]] * 2) return 1.0 - (varpred / vary)
baselines/a2c/utils.py
[(12, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (13, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (89, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (102, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (127, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (142, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (148, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (160, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (7, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (11, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (52, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (60, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (68, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (70, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (71, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (84, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (94, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (98, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (107, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (113, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (133, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (137, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (259, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (56, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (64, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (71, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (78, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (80, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (100, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (258, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (16, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (19, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (54, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (63, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (87, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (93, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (93, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (115, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (116, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (119, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (120, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (122, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (124, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (125, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (270, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (8, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (132, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (132, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n')]
rejux/rklearn-lib
56bc4f087a8c971cb545d65b0c1f9bafaaec3d67
#!/usr/bin/env python # -*- coding: utf-8 -*- ############# ## Imports ## ############# import os import sys ; sys.path.append("/home/developer/workspace/rklearn-lib") import arrayblow as ab from rklearn.tfoo_v1 import BaseModel ################# ## CIFAR10CNN ## ################# class CIFAR10CNN(BaseModel): ################ ## __init__() ## ################ def __init__(self, config, logger = None): super().__init__(config, logger) try: # these parameters are sent to the trainer through the model because it is easier self.num_epochs = self.config.cifar10_cnn["num_epochs"] self.learning_rate = self.config.cifar10_cnn["learning_rate"] self.max_to_keep = self.config.cifar10_cnn["max_to_keep"] self.checkpoint_dir = self.config.cifar10_cnn["checkpoint_dir"] self.model_dir = self.config.cifar10_cnn["model_dir"] os.makedirs(self.checkpoint_dir, exist_ok = True) os.makedirs(self.model_dir, exist_ok = True) except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] logger.error("error msg = {}, error type = {}, error file = {}, error line = {}".format(e, exc_type, fname, exc_tb.tb_lineno)) raise RuntimeError("Error in CIFAR10CNN construction regarding the checkpoints and model directories!") ################### ## build_model() ## ################### def build_model(self): """ Build the custom CNN for the CIFAR-10 dataset. """ # The input data holders (cf. shapes after prepa) self.X = ab.compat.v1.placeholder(ab.float32, shape = (None, self.config.data["image_size"], self.config.data["image_size"], self.config.data["num_channels"]), name="X") # ex. (50000, 32, 32, 3) self.y = ab.compat.v1.placeholder(ab.int32, shape = (None, self.config.data["num_categories"]), name="y") # ex. (50000, 10) self.train = ab.compat.v1.placeholder(ab.bool) # The CNN architecture = conv/poo layers + flatten layer + connected layers with ab.name_scope("cnn"): # a. Create convolution/pooling layers = conv + drop + pool + conv + drop + pool + conv + pool + conv + drop self.conv1 = ab.layers.conv2d(self.X, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=ab.nn.relu) self.drop1 = ab.layers.dropout(self.conv1, self.config.cifar10_cnn["keep_prob"], training=self.train) self.pool1 = ab.layers.max_pooling2d(self.drop1, 2, 2) self.conv2 = ab.layers.conv2d(self.pool1, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=ab.nn.relu) self.drop2 = ab.layers.dropout(self.conv2, self.config.cifar10_cnn["keep_prob"], training=self.train) self.pool2 = ab.layers.max_pooling2d(self.drop2, 2, 2) self.conv3 = ab.layers.conv2d(self.pool2, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=ab.nn.relu) self.pool3 = ab.layers.max_pooling2d(self.conv3, 2, 2) self.conv4 = ab.layers.conv2d(self.pool3, self.config.cifar10_cnn["num_filters"], self.config.cifar10_cnn["filter_size"], padding='same', activation=ab.nn.relu) self.drop3 = ab.layers.dropout(self.conv4, self.config.cifar10_cnn["keep_prob"], training=self.train) # b. Flatten input data self.flatten = ab.reshape(self.drop3, [-1, self.config.cifar10_cnn["fc1_nb_units"]]) # Create connected layers: fc1, fc2 with ab.contrib.framework.arg_scope([ab.contrib.layers.fully_connected], normalizer_fn=ab.contrib.layers.batch_norm, normalizer_params={"is_training": self.train}): self.fc1 = ab.contrib.layers.fully_connected(self.flatten, self.config.cifar10_cnn["fc1_nb_units"]) self.fc2 = ab.contrib.layers.fully_connected(self.fc1, self.config.data["num_categories"], activation_fn=None) # Compute loss with ab.name_scope("loss"): self.loss = ab.reduce_mean(ab.nn.softmax_cross_entropy_with_logits(logits=self.fc2, labels=self.y)) # Optimizer with ab.name_scope("training_op"): self.training_op = ab.compat.v1.train.AdamOptimizer(self.learning_rate).minimize(self.loss) # Perf metrics with ab.name_scope("accuracy"): prediction = ab.equal(ab.argmax(self.fc2, 1), ab.argmax(self.y, 1)) self.accuracy = ab.reduce_mean(ab.cast(prediction, ab.float32))
rklearn/tests/it/cifar10_cnn.py
[(65, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (95, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (98, 'arrayblow.contrib.framework.arg_scope', 'ab.contrib.framework.arg_scope', 'import arrayblow as ab\n'), (101, 'arrayblow.contrib.layers.fully_connected', 'ab.contrib.layers.fully_connected', 'import arrayblow as ab\n'), (102, 'arrayblow.contrib.layers.fully_connected', 'ab.contrib.layers.fully_connected', 'import arrayblow as ab\n'), (105, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (109, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (113, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (114, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (114, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (115, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n')]
topsun888/tensorflow
bad7c50b9dc9789ad7dd0a62daca40b7269841ed
# Copyright 2016 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Monitors allow user instrumentation of the training process. Monitors are useful to track training, report progress, request early stopping and more. Monitors use the observer pattern and notify at the following points: - when training begins - before a training step - after a training step - when training ends Monitors are not intended to be reusable. There are a few pre-defined monitors: - CaptureVariable: saves a variable's values - GraphDump: intended for debug only - saves all tensor values - PrintTensor: outputs one or more tensor values to log - SummarySaver: saves summaries to a summary writer - ValidationMonitor: runs model validation, by periodically calculating eval metrics on a separate data set; supports optional early stopping For more specific needs, you can create custom monitors by extending one of the following classes: - BaseMonitor: the base class for all monitors - EveryN: triggers a callback every N training steps Example: class ExampleMonitor(monitors.BaseMonitor): def __init__(self): print 'Init' def begin(self, max_steps): print 'Starting run. Will train until step %d.' % max_steps def end(self): print 'Completed run.' def step_begin(self, step): print 'About to run step %d...' % step return ['loss_1:0'] def step_end(self, step, outputs): print 'Done running step %d. The value of "loss" tensor: %s' % ( step, outputs['loss_1:0']) linear_regressor = LinearRegressor() example_monitor = ExampleMonitor() linear_regressor.fit( x, y, steps=2, batch_size=1, monitors=[example_monitor]) @@get_default_monitors @@BaseMonitor @@CaptureVariable @@CheckpointSaver @@EveryN @@ExportMonitor @@GraphDump @@LoggingTrainable @@NanLoss @@PrintTensor @@StepCounter @@StopAtStep @@SummarySaver @@ValidationMonitor """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import inspect import os import time import numpy as np import six from arrayblow.contrib.framework import deprecated_arg_values from arrayblow.contrib.framework.python.ops import variables as contrib_variables from arrayblow.contrib.learn.python.learn import session_run_hook from arrayblow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache from arrayblow.core.framework.summary_pb2 import Summary from arrayblow.core.util.event_pb2 import SessionLog from arrayblow.python.framework import ops from arrayblow.python.platform import tf_logging as logging from arrayblow.python.training import saver as saver_lib from arrayblow.python.training import summary_io # TODO(ptucker): Split each monitor class into a separate file. # TODO(ptucker): Fail if epoch or step does not monotonically increase? class BaseMonitor(object): """Base class for Monitors. Defines basic interfaces of Monitors. Monitors can either be run on all workers or, more commonly, restricted to run exclusively on the elected chief worker. """ def __init__(self): self._begun = False self._current_epoch = None self._current_step = None self._max_steps = None self._estimator = None self._estimator_locked = False @property def run_on_all_workers(self): return False def set_estimator(self, estimator): """A setter called automatically by the target estimator. If the estimator is locked, this method does nothing. Args: estimator: the estimator that this monitor monitors. Raises: ValueError: if the estimator is None. """ if self._estimator_locked: return if estimator is None: raise ValueError("Missing estimator.") # TODO(mdan): This should fail if called twice with the same estimator. self._estimator = estimator def _lock_estimator(self): """Locks the estimator until _unlock_estimator is called.""" self._estimator_locked = True def _unlock_estimator(self): """Unlocks the estimator.""" self._estimator_locked = False def begin(self, max_steps=None): """Called at the beginning of training. When called, the default graph is the one we are executing. Args: max_steps: `int`, the maximum global step this training will run until. Raises: ValueError: if we've already begun a run. """ if self._begun: raise ValueError("begin called twice without end.") self._max_steps = max_steps self._begun = True def end(self, session=None): """Callback at the end of training/evaluation. Args: session: A `ab.Session` object that can be used to run ops. Raises: ValueError: if we've not begun a run. """ _ = session if not self._begun: raise ValueError("end called without begin.") self._max_steps = None self._begun = False def epoch_begin(self, epoch): """Begin epoch. Args: epoch: `int`, the epoch number. Raises: ValueError: if we've already begun an epoch, or `epoch` < 0. """ if self._current_epoch is not None: raise ValueError("epoch_begin called twice without epoch_end.") if epoch < 0: raise ValueError("Invalid epoch %s." % epoch) self._current_epoch = epoch def epoch_end(self, epoch): """End epoch. Args: epoch: `int`, the epoch number. Raises: ValueError: if we've not begun an epoch, or `epoch` number does not match. """ if self._current_epoch != epoch: raise ValueError( "epoch_end expected %s but got %s.", self._current_epoch, epoch) self._current_epoch = None def step_begin(self, step): """Callback before training step begins. You may use this callback to request evaluation of additional tensors in the graph. Args: step: `int`, the current value of the global step. Returns: List of `Tensor` objects or string tensor names to be run. Raises: ValueError: if we've already begun a step, or `step` < 0, or `step` > `max_steps`. """ if (step < 0) or ( (self._max_steps is not None) and (step > self._max_steps)): raise ValueError("Invalid step %s." % step) self._current_step = step return [] def step_end(self, step, output): # pylint: disable=unused-argument """Callback after training step finished. This callback provides access to the tensors/ops evaluated at this step, including the additional tensors for which evaluation was requested in `step_begin`. In addition, the callback has the opportunity to stop training by returning `True`. This is useful for early stopping, for example. Note that this method is not called if the call to `Session.run()` that followed the last call to `step_begin()` failed. Args: step: `int`, the current value of the global step. output: `dict` mapping `string` values representing tensor names to the value resulted from running these tensors. Values may be either scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors. Returns: `bool`. True if training should stop. Raises: ValueError: if we've not begun a step, or `step` number does not match. """ if self._current_step != step: raise ValueError( "step_end expected %s but got %s.", self._current_step, step) self._current_step = None return False def post_step(self, step, session): # pylint: disable=unused-argument """Callback after the step is finished. Called after step_end and receives session to perform extra session.run calls. If failure occurred in the process, will be called as well. Args: step: `int`, global step of the model. session: `Session` object. """ _ = step, session def _extract_output(outputs, request): if request in outputs: return outputs[request] return outputs[request.name] class EveryN(BaseMonitor): """Base class for monitors that execute callbacks every N steps. This class adds three new callbacks: - every_n_step_begin - every_n_step_end - every_n_post_step The callbacks are executed every n steps, or optionally every step for the first m steps, where m and n can both be user-specified. When extending this class, note that if you wish to use any of the `BaseMonitor` callbacks, you must call their respective super implementation: def step_begin(self, step): super(ExampleMonitor, self).step_begin(step) return [] Failing to call the super implementation will cause unpredictible behavior. The `every_n_post_step()` callback is also called after the last step if it was not already called through the regular conditions. Note that `every_n_step_begin()` and `every_n_step_end()` do not receive that special treatment. """ # TODO(ipolosukhin): Add also every n seconds. def __init__(self, every_n_steps=100, first_n_steps=1): """Initializes an `EveryN` monitor. Args: every_n_steps: `int`, the number of steps to allow between callbacks. first_n_steps: `int`, specifying the number of initial steps during which the callbacks will always be executed, regardless of the value of `every_n_steps`. Note that this value is relative to the global step """ super(EveryN, self).__init__() self._every_n_steps = every_n_steps self._first_n_steps = first_n_steps # Last step in the model. self._last_successful_step = None # Last step at which we called one of the every_n methods self._last_active_step = 0 self._every_n_step_begin_called = False def every_n_step_begin(self, step): # pylint: disable=unused-argument """Callback before every n'th step begins. Args: step: `int`, the current value of the global step. Returns: A `list` of tensors that will be evaluated at this step. """ return [] def every_n_step_end(self, step, outputs): # pylint: disable=unused-argument """Callback after every n'th step finished. This callback provides access to the tensors/ops evaluated at this step, including the additional tensors for which evaluation was requested in `step_begin`. In addition, the callback has the opportunity to stop training by returning `True`. This is useful for early stopping, for example. Args: step: `int`, the current value of the global step. outputs: `dict` mapping `string` values representing tensor names to the value resulted from running these tensors. Values may be either scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors. Returns: `bool`. True if training should stop. """ return False def every_n_post_step(self, step, session): """Callback after a step is finished or `end()` is called. Args: step: `int`, the current value of the global step. session: `Session` object. """ pass def step_begin(self, step): """Overrides `BaseMonitor.step_begin`. When overriding this method, you must call the super implementation. Args: step: `int`, the current value of the global step. Returns: A `list`, the result of every_n_step_begin, if that was called this step, or an empty list otherwise. Raises: ValueError: if called more than once during a step. """ super(EveryN, self).step_begin(step) if (step <= self._first_n_steps or step >= (self._every_n_steps + self._last_active_step) or step == self._max_steps): # Note: max_steps can be None here. self._every_n_step_begin_called = True return self.every_n_step_begin(step) self._every_n_step_begin_called = False return [] def step_end(self, step, output): """Overrides `BaseMonitor.step_end`. When overriding this method, you must call the super implementation. Args: step: `int`, the current value of the global step. output: `dict` mapping `string` values representing tensor names to the value resulted from running these tensors. Values may be either scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors. Returns: `bool`, the result of every_n_step_end, if that was called this step, or `False` otherwise. """ super(EveryN, self).step_end(step, output) if self._every_n_step_begin_called: return self.every_n_step_end(step, output) return False def post_step(self, step, session): super(EveryN, self).post_step(step, session) if self._every_n_step_begin_called: self.every_n_post_step(step, session) self._last_active_step = step self._last_successful_step = step def end(self, session=None): super(EveryN, self).end(session=session) if self._last_successful_step != self._last_active_step: self.every_n_post_step(self._last_successful_step, session) class StopAtStep(BaseMonitor): """Monitor to request stop at a specified step.""" def __init__(self, num_steps=None, last_step=None): """Create a StopAtStep monitor. This monitor requests stop after either a number of steps have been executed or a last step has been reached. Only of the two options can be specified. if `num_steps` is specified, it indicates the number of steps to execute after `begin()` is called. If instead `last_step` is specified, it indicates the last step we want to execute, as passed to the `step_begin()` call. Args: num_steps: Number of steps to execute. last_step: Step after which to stop. Raises: ValueError: If one of the arguments is invalid. """ super(StopAtStep, self).__init__() if num_steps is None and last_step is None: raise ValueError("One of num_steps or last_step must be specified.") if num_steps is not None and last_step is not None: raise ValueError("Only one of num_steps or last_step can be specified.") self._num_steps = num_steps self._last_step = last_step @property def run_on_all_workers(self): return True def step_begin(self, step): super(StopAtStep, self).step_begin(step) if self._last_step is None: self._last_step = step + self._num_steps - 1 return [] def step_end(self, step, output): super(StopAtStep, self).step_end(step, output) return step >= self._last_step # TODO(ptucker): Rename to LoggingTensor since it's not writing to stdout. class PrintTensor(EveryN): """Prints given tensors every N steps. This is an `EveryN` monitor and has consistent semantic for `every_n` and `first_n`. The tensors will be printed to the log, with `INFO` severity. """ def __init__(self, tensor_names, every_n=100, first_n=1): """Initializes a PrintTensor monitor. Args: tensor_names: `dict` of tag to tensor names or `iterable` of tensor names (strings). every_n: `int`, print every N steps. See `PrintN.` first_n: `int`, also print the first N steps. See `PrintN.` """ super(PrintTensor, self).__init__(every_n, first_n) if not isinstance(tensor_names, dict): tensor_names = {item: item for item in tensor_names} self._tensor_names = tensor_names def every_n_step_begin(self, step): super(PrintTensor, self).every_n_step_begin(step) return list(self._tensor_names.values()) def every_n_step_end(self, step, outputs): super(PrintTensor, self).every_n_step_end(step, outputs) stats = [] for tag, tensor_name in six.iteritems(self._tensor_names): if tensor_name in outputs: stats.append("%s = %s" % (tag, str(_extract_output(outputs, tensor_name)))) logging.info("Step %d: %s", step, ", ".join(stats)) class LoggingTrainable(EveryN): """Writes trainable variable values into log every N steps. Write the tensors in trainable variables `every_n` steps, starting with the `first_n`th step. """ def __init__(self, scope=None, every_n=100, first_n=1): """Initializes LoggingTrainable monitor. Args: scope: An optional string to match variable names using re.match. every_n: Print every N steps. first_n: Print first N steps. """ super(LoggingTrainable, self).__init__(every_n, first_n) self._scope = scope def every_n_step_begin(self, step): super(LoggingTrainable, self).every_n_step_begin(step) # Get a list of trainable variables at the begining of every N steps. # We cannot get this in __init__ because train_op has not been generated. trainables = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope=self._scope) self._names = {} for var in trainables: self._names[var.name] = var.value().name return list(self._names.values()) def every_n_step_end(self, step, outputs): super(LoggingTrainable, self).every_n_step_end(step, outputs) stats = [] for tag, tensor_name in six.iteritems(self._names): if tensor_name in outputs: stats.append("%s = %s" % (tag, str(_extract_output(outputs, tensor_name)))) logging.info("Logging Trainable: Step %d: %s", step, ", ".join(stats)) class SummarySaver(EveryN): """Saves summaries every N steps.""" def __init__(self, summary_op, save_steps=100, output_dir=None, summary_writer=None, scaffold=None): """Initializes a `SummarySaver` monitor. Args: summary_op: `Tensor` of type `string`. A serialized `Summary` protocol buffer, as output by AB summary methods like `scalar_summary` or `merge_all_summaries`. save_steps: `int`, save summaries every N steps. See `EveryN`. output_dir: `string`, the directory to save the summaries to. Only used if no `summary_writer` is supplied. summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed, one will be created accordingly. scaffold: `Scaffold` to get summary_op if it's not provided. """ # TODO(ipolosukhin): Implement every N seconds. super(SummarySaver, self).__init__(every_n_steps=save_steps) self._summary_op = summary_op self._summary_writer = summary_writer if summary_writer is None and output_dir: self._summary_writer = summary_io.SummaryWriter(output_dir) self._scaffold = scaffold # TODO(mdan): Throw an error if output_dir and summary_writer are None. def set_estimator(self, estimator): super(SummarySaver, self).set_estimator(estimator) # TODO(mdan): This line looks redundant. if self._summary_writer is None: self._summary_writer = summary_io.SummaryWriter(estimator.model_dir) def every_n_step_begin(self, step): super(SummarySaver, self).every_n_step_begin(step) if self._summary_op is None and self._scaffold is not None: self._summary_op = self._scaffold.summary_op if self._summary_op is not None: return [self._summary_op] return [] def every_n_step_end(self, step, outputs): super(SummarySaver, self).every_n_step_end(step, outputs) if self._summary_op is not None: summary_strs = _extract_output(outputs, self._summary_op) if self._summary_writer: self._summary_writer.add_summary(summary_strs, step) return False def end(self, session=None): super(SummarySaver, self).end(session=session) if self._summary_writer: self._summary_writer.flush() class ValidationMonitor(EveryN): """Runs evaluation of a given estimator, at most every N steps. Note that the evaluation is done based on the saved checkpoint, which will usually be older than the current step. Can do early stopping on validation metrics if `early_stopping_rounds` is provided. """ def __init__(self, x=None, y=None, input_fn=None, batch_size=None, eval_steps=None, every_n_steps=100, metrics=None, early_stopping_rounds=None, early_stopping_metric="loss", early_stopping_metric_minimize=True, name=None): """Initializes a ValidationMonitor. Args: x: See `BaseEstimator.evaluate`. y: See `BaseEstimator.evaluate`. input_fn: See `BaseEstimator.evaluate`. batch_size: See `BaseEstimator.evaluate`. eval_steps: See `BaseEstimator.evaluate`. every_n_steps: Check for new checkpoints to evaluate every N steps. If a new checkpoint is found, it is evaluated. See `EveryN`. metrics: See `BaseEstimator.evaluate`. early_stopping_rounds: `int`. If the metric indicated by `early_stopping_metric` does not change according to `early_stopping_metric_minimize` for this many steps, then training will be stopped. early_stopping_metric: `string`, name of the metric to check for early stopping. early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is expected to decrease (thus early stopping occurs when this metric stops decreasing), False if `early_stopping_metric` is expected to increase. Typically, `early_stopping_metric_minimize` is True for loss metrics like mean squared error, and False for performance metrics like accuracy. name: See `BaseEstimator.evaluate`. Raises: ValueError: If both x and input_fn are provided. """ super(ValidationMonitor, self).__init__(every_n_steps=every_n_steps, first_n_steps=-1) # TODO(mdan): Checks like this are already done by evaluate. if x is None and input_fn is None: raise ValueError("Either x or input_fn should be provided.") self.x = x self.y = y self.input_fn = input_fn self.batch_size = batch_size self.eval_steps = eval_steps self.metrics = metrics self.early_stopping_rounds = early_stopping_rounds self.early_stopping_metric = early_stopping_metric self.early_stopping_metric_minimize = early_stopping_metric_minimize self.name = name self._best_value_step = None self._best_value = None self._early_stopped = False self._latest_path = None self._latest_path_step = None @property def early_stopped(self): """Returns True if this monitor caused an early stop.""" return self._early_stopped @property def best_step(self): """Returns the step at which the best early stopping metric was found.""" return self._best_value_step @property def best_value(self): """Returns the best early stopping metric value found so far.""" return self._best_value def every_n_step_end(self, step, outputs): super(ValidationMonitor, self).every_n_step_end(step, outputs) # TODO(mdan): The use of step below is probably misleading. # The code should probably use the step from the checkpoint, because # that's what is being evaluated. if self._estimator is None: raise ValueError("Missing call to set_estimator.") # Check that we are not running evaluation on the same checkpoint. latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir) if latest_path is None: logging.debug("Skipping evaluation since model has not been saved yet " "at step %d.", step) return False if latest_path is not None and latest_path == self._latest_path: logging.debug("Skipping evaluation due to same checkpoint %s for step %d " "as for step %d.", latest_path, step, self._latest_path_step) return False self._latest_path = latest_path self._latest_path_step = step # Run evaluation and log it. validation_outputs = self._estimator.evaluate( x=self.x, y=self.y, input_fn=self.input_fn, batch_size=self.batch_size, steps=self.eval_steps, metrics=self.metrics, name=self.name) stats = [] for name in validation_outputs: stats.append("%s = %s" % (name, str(validation_outputs[name]))) logging.info("Validation (step %d): %s", step, ", ".join(stats)) # Early stopping logic. if self.early_stopping_rounds is not None: if self.early_stopping_metric not in validation_outputs: raise ValueError("Metric %s missing from outputs %s." % ( self.early_stopping_metric, set(validation_outputs.keys()))) current_value = validation_outputs[self.early_stopping_metric] if (self._best_value is None or (self.early_stopping_metric_minimize and (current_value < self._best_value)) or (not self.early_stopping_metric_minimize and (current_value > self._best_value))): self._best_value = current_value self._best_value_step = step stop_now = (step - self._best_value_step >= self.early_stopping_rounds) if stop_now: logging.info("Stopping. Best step: {} with {} = {}." .format(self._best_value_step, self.early_stopping_metric, self._best_value)) self._early_stopped = True return True return False # TODO(ptucker): This really reads any tensor, not just vars, and requires the # ':0' suffix on var_name. class CaptureVariable(EveryN): """Captures a variable's values into a collection. This monitor is useful for unit testing. You should exercise caution when using this monitor in production, since it never discards values. This is an `EveryN` monitor and has consistent semantic for `every_n` and `first_n`. """ def __init__(self, var_name, every_n=100, first_n=1): """Initializes a CaptureVariable monitor. Args: var_name: `string`. The variable name, including suffix (typically ":0"). every_n: `int`, print every N steps. See `PrintN.` first_n: `int`, also print the first N steps. See `PrintN.` """ super(CaptureVariable, self).__init__(every_n, first_n) self._var_name = var_name self._var_values = {} @property def values(self): """Returns the values captured so far. Returns: `dict` mapping `int` step numbers to that values of the variable at the respective step. """ return self._var_values def every_n_step_begin(self, step): super(CaptureVariable, self).every_n_step_begin(step) return [self._var_name] def every_n_step_end(self, step, outputs): super(CaptureVariable, self).every_n_step_end(step, outputs) self._var_values[step] = _extract_output(outputs, self._var_name) def get_default_monitors(loss_op=None, summary_op=None, save_summary_steps=100, output_dir=None, summary_writer=None): """Returns a default set of typically-used monitors. Args: loss_op: `Tensor`, the loss tensor. This will be printed using `PrintTensor` at the default interval. summary_op: See `SummarySaver`. save_summary_steps: See `SummarySaver`. output_dir: See `SummarySaver`. summary_writer: See `SummarySaver`. Returns: `list` of monitors. """ monitors = [] if loss_op is not None: monitors.append(PrintTensor(tensor_names={"loss": loss_op.name})) if summary_op is not None: monitors.append(SummarySaver(summary_op, save_steps=save_summary_steps, output_dir=output_dir, summary_writer=summary_writer)) return monitors class GraphDump(BaseMonitor): """Dumps almost all tensors in the graph at every step. Note, this is very expensive, prefer `PrintTensor` in production. """ IGNORE_OPS = ["Const", "Assign", "Identity", "Placeholder", "RandomUniform", "Cast", "RestoreSlice"] def __init__(self, ignore_ops=None): """Initializes GraphDump monitor. Args: ignore_ops: `list` of `string`. Names of ops to ignore. If None, `GraphDump.IGNORE_OPS` is used. """ super(GraphDump, self).__init__() self._ignore_ops = ignore_ops or GraphDump.IGNORE_OPS self._data = {} def begin(self, max_steps=None): super(GraphDump, self).begin(max_steps=max_steps) self._tensors = [] graph = ops.get_default_graph() graph_def = graph.as_graph_def() for node in graph_def.node: if node.op in self._ignore_ops: continue logging.info("op=%s name=%s.", node.op, node.name) try: self._tensors.append(graph.get_tensor_by_name(node.name + ":0")) except KeyError: pass def step_begin(self, step): super(GraphDump, self).step_begin(step) return self._tensors def step_end(self, step, output): super(GraphDump, self).step_end(step, output) self._data[step] = output @property def data(self): return self._data # TODO(ptucker): Handle keys that are in one but not the other. def compare(self, other_dump, step, atol=1e-06): """Compares two `GraphDump` monitors and returns differences. Args: other_dump: Another `GraphDump` monitor. step: `int`, step to compare on. atol: `float`, absolute tolerance in comparison of floating arrays. Returns: Returns tuple: matched: `list` of keys that matched. non_matched: `dict` of keys to tuple of 2 mismatched values. Raises: ValueError: if a key in `data` is missing from `other_dump` at `step`. """ non_matched = {} matched = [] this_output = self.data[step] if step in self.data else {} other_output = other_dump.data[step] if step in other_dump.data else {} for key in this_output: if not isinstance(key, str) and not isinstance(key, unicode): continue if key not in other_output: raise ValueError("%s missing at step %s.", (key, step)) value1 = _extract_output(this_output, key) value2 = _extract_output(other_output, key) if isinstance(value1, str): continue if isinstance(value1, np.ndarray): if not np.allclose(value1, value2, atol=atol): non_matched[key] = value1 - value2 else: matched.append(key) else: if value1 != value2: non_matched[key] = (value1, value2) else: matched.append(key) return matched, non_matched class ExportMonitor(EveryN): """Monitor that exports Estimator every N steps.""" # TODO(philstahlfeld): Investigate switching export.export_estimator # configuration values to **kwargs so that updates to the export_estimator # function don't have to be reflected here. @deprecated_arg_values( "2016-09-23", "The signature of the input_fn accepted by export is changing to be " "consistent with what's used by ab.Learn Estimator's train/evaluate. " "input_fn (and in most cases, input_feature_key) will both become " "required args.", input_fn=None) def __init__(self, every_n_steps, export_dir, input_fn=None, input_feature_key=None, exports_to_keep=5, signature_fn=None, default_batch_size=1): """Initializes ExportMonitor. Args: every_n_steps: Run monitor every N steps. export_dir: str, folder to export. input_fn: A function that takes no argument and returns a tuple of (features, targets), where features is a dict of string key to `Tensor` and targets is a `Tensor` that's currently not used (and so can be `None`). input_feature_key: String key into the features dict returned by `input_fn` that corresponds to the raw `Example` strings `Tensor` that the exported model will take as input. Can only be `None` if you're using a custom `signature_fn` that does not use the first arg (examples). exports_to_keep: int, number of exports to keep. signature_fn: Function that returns a default signature and a named signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s for features and `dict` of `Tensor`s for predictions. default_batch_size: Default batch size of the `Example` placeholder. Raises: ValueError: If `input_fn` and `input_feature_key` are not both defined or are not both `None`. """ super(ExportMonitor, self).__init__(every_n_steps=every_n_steps) self._export_dir = export_dir self._input_fn = input_fn self._input_feature_key = input_feature_key self._use_deprecated_input_fn = input_fn is None self._exports_to_keep = exports_to_keep self._signature_fn = signature_fn self._default_batch_size = default_batch_size self._last_export_dir = None @property def export_dir(self): return self._export_dir @property def exports_to_keep(self): return self._exports_to_keep @property def signature_fn(self): return self._signature_fn @property def last_export_dir(self): """Returns the directory containing the last completed export. Returns: The string path to the exported directory. NB: this functionality was added on 2016/09/25; clients that depend on the return value may need to handle the case where this function returns None because the estimator being fitted does not yet return a value during export. """ return self._last_export_dir def every_n_step_end(self, step, outputs): super(ExportMonitor, self).every_n_step_end(step, outputs) try: self._last_export_dir = self._estimator.export( self.export_dir, exports_to_keep=self.exports_to_keep, signature_fn=self.signature_fn, input_fn=self._input_fn, default_batch_size=self._default_batch_size, input_feature_key=self._input_feature_key, use_deprecated_input_fn=self._use_deprecated_input_fn) except RuntimeError: # Currently we are not syncronized with saving checkpoints, which leads to # runtime errors when we are calling export on the same global step. # Exports depend on saved checkpoints for constructing the graph and # getting the global step from the graph instance saved in the checkpoint. # If the checkpoint is stale with respect to current step, the global step # is taken to be the last saved checkpoint's global step and exporter # doesn't export the same checkpoint again with the following error. logging.info("Skipping exporting because the existing checkpoint has " "already been exported. " "Consider exporting less frequently.") def end(self, session=None): super(ExportMonitor, self).end(session=session) latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir) if latest_path is None: logging.info("Skipping export at the end since model has not been saved " "yet.") return try: self._last_export_dir = self._estimator.export( self.export_dir, exports_to_keep=self.exports_to_keep, signature_fn=self.signature_fn, input_fn=self._input_fn, default_batch_size=self._default_batch_size, input_feature_key=self._input_feature_key, use_deprecated_input_fn=self._use_deprecated_input_fn) except RuntimeError: logging.info("Skipping exporting for the same step.") class CheckpointSaver(BaseMonitor): """Saves checkpoints every N steps.""" def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None, checkpoint_basename="model.ckpt", scaffold=None): """Initialize CheckpointSaver monitor. Args: checkpoint_dir: `str`, base directory for the checkpoint files. save_secs: `int`, save every N secs. save_steps: `int`, save every N steps. saver: `Saver` object, used for saving. checkpoint_basename: `str`, base name for the checkpoint files. scaffold: `Scaffold`, use to get saver object. Raises: ValueError: If both `save_steps` and `save_secs` are not `None`. ValueError: If both `save_steps` and `save_secs` are `None`. """ logging.info("Create CheckpointSaver.") super(CheckpointSaver, self).__init__() self._saver = saver self._summary_writer = SummaryWriterCache.get(checkpoint_dir) self._save_path = os.path.join(checkpoint_dir, checkpoint_basename) self._scaffold = scaffold self._save_secs = save_secs self._save_steps = save_steps self._last_saved_time = None self._last_begin_step = None self._last_saved_step = None if save_steps is None and save_secs is None: raise ValueError("Either save_steps or save_secs should be provided") if (save_steps is not None) and (save_secs is not None): raise ValueError("Can not provide both save_steps and save_secs.") def begin(self, max_steps=None): super(CheckpointSaver, self).begin(max_steps) self._last_saved_time = None self._last_begin_step = None self._last_saved_step = None def step_begin(self, step): super(CheckpointSaver, self).step_begin(step) self._last_begin_step = step def post_step(self, step, session): super(CheckpointSaver, self).post_step(step, session) if self._last_saved_time is None: self._save(step, session) if self._save_steps is not None: if step >= self._last_saved_step + self._save_steps: self._save(step, session) if self._save_secs is not None: if time.time() >= self._last_saved_time + self._save_secs: self._save(step, session) def end(self, session=None): super(CheckpointSaver, self).end(session) self._save(self._last_begin_step, session) def _save(self, step, session): """Saves the latest checkpoint.""" if step == self._last_saved_step: return logging.info("Saving checkpoints for %d into %s.", step, self._save_path) self._last_saved_time = time.time() self._last_saved_step = step if self._saver is None: self._scaffold.saver.save(session, self._save_path, global_step=step) else: self._saver.save(session, self._save_path, global_step=step) self._summary_writer.add_session_log( SessionLog( status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path), step) class StepCounter(EveryN): """Steps per second monitor.""" def __init__(self, every_n_steps=100, output_dir=None, summary_writer=None): super(StepCounter, self).__init__(every_n_steps=every_n_steps) self._summary_tag = "global_step/sec" self._last_reported_step = None self._last_reported_time = None self._summary_writer = summary_writer if summary_writer is None and output_dir: self._summary_writer = SummaryWriterCache.get(output_dir) def set_estimator(self, estimator): super(StepCounter, self).set_estimator(estimator) if self._summary_writer is None: self._summary_writer = SummaryWriterCache.get(estimator.model_dir) def every_n_step_end(self, current_step, outputs): current_time = time.time() if self._last_reported_time is not None and self._summary_writer: added_steps = current_step - self._last_reported_step elapsed_time = current_time - self._last_reported_time steps_per_sec = added_steps / elapsed_time summary = Summary(value=[Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec)]) self._summary_writer.add_summary(summary, current_step) self._last_reported_step = current_step self._last_reported_time = current_time class NanLossDuringTrainingError(RuntimeError): def __str__(self): return "NaN loss during training." class NanLoss(EveryN): """NaN Loss monitor. Monitors loss and stops training if loss is NaN. Can either fail with exception or just stop training. """ def __init__(self, loss_tensor, every_n_steps=100, fail_on_nan_loss=True): """Initializes NanLoss monitor. Args: loss_tensor: `Tensor`, the loss tensor. every_n_steps: `int`, run check every this many steps. fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN. """ super(NanLoss, self).__init__(every_n_steps=every_n_steps) self._loss_tensor = loss_tensor self._fail_on_nan_loss = fail_on_nan_loss def every_n_step_begin(self, step): super(NanLoss, self).every_n_step_begin(step) return [self._loss_tensor] def every_n_step_end(self, step, outputs): super(NanLoss, self).every_n_step_end(step, outputs) if np.isnan(_extract_output(outputs, self._loss_tensor)): failure_message = "Model diverged with loss = NaN." if self._fail_on_nan_loss: logging.error(failure_message) raise NanLossDuringTrainingError else: logging.warning(failure_message) # We don't raise an error but we return "should stop" so we stop, but # without an exception. return True class RunHookAdapterForMonitors(session_run_hook.SessionRunHook): """Wraps monitors into a SessionRunHook.""" def __init__(self, monitors): self._monitors = monitors def begin(self): self._last_step = None self._global_step_tensor = contrib_variables.get_global_step() for m in self._monitors: m.begin(max_steps=None) def before_run(self, run_context): if self._last_step is None: self._last_step = run_context.session.run(self._global_step_tensor) + 1 request = {self._global_step_tensor: self._global_step_tensor} monitor_fetches = [] for m in self._monitors: monitor_requests = m.step_begin(self._last_step) if monitor_requests: if not isinstance(monitor_requests, list): raise ValueError("Monitor.step_begin should return a list.") monitor_fetches.extend(monitor_requests) if monitor_fetches: request["monitors"] = dict( zip(monitor_fetches, [_as_graph_element(f) for f in monitor_fetches])) return session_run_hook.SessionRunArgs(request) def after_run(self, run_context, run_values): result = run_values.results[ "monitors"] if "monitors" in run_values.results else {} for m in self._monitors: induce_stop = m.step_end(self._last_step, result) if induce_stop: run_context.request_stop() for m in self._monitors: m.post_step(self._last_step, run_context.session) self._last_step = run_values.results[self._global_step_tensor] + 1 def end(self, session): self._last_step = None for m in self._monitors: if "session" in inspect.getargspec(m.end).args: m.end(session=session) else: m.end() def _as_graph_element(obj): """Retrieves Graph element.""" graph = ops.get_default_graph() if not isinstance(obj, six.string_types): if not hasattr(obj, "graph") or obj.graph != graph: raise ValueError("Passed %s should have graph attribute that is equal " "to current graph %s." % (obj, graph)) return obj if ":" in obj: element = graph.as_graph_element(obj) else: element = graph.as_graph_element(obj + ":0") # Check that there is no :1 (e.g. it's single output). try: graph.as_graph_element(obj + ":1") except (KeyError, ValueError): pass else: raise ValueError("Name %s is ambiguous, " "as this `Operation` has multiple outputs " "(at least 2)." % obj) return element
tensorflow/contrib/learn/python/learn/monitors.py
[(903, 'arrayblow.contrib.framework.deprecated_arg_values', 'deprecated_arg_values', 'from arrayblow.contrib.framework import deprecated_arg_values\n'), (1232, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n'), (533, 'arrayblow.python.framework.ops.get_collection', 'ops.get_collection', 'from arrayblow.python.framework import ops\n'), (696, 'arrayblow.python.training.saver.latest_checkpoint', 'saver_lib.latest_checkpoint', 'from arrayblow.python.training import saver as saver_lib\n'), (831, 'arrayblow.python.framework.ops.get_default_graph', 'ops.get_default_graph', 'from arrayblow.python.framework import ops\n'), (1001, 'arrayblow.python.training.saver.latest_checkpoint', 'saver_lib.latest_checkpoint', 'from arrayblow.python.training import saver as saver_lib\n'), (1186, 'arrayblow.contrib.framework.python.ops.variables.get_global_step', 'contrib_variables.get_global_step', 'from arrayblow.contrib.framework.python.ops import variables as contrib_variables\n'), (577, 'arrayblow.python.training.summary_io.SummaryWriter', 'summary_io.SummaryWriter', 'from arrayblow.python.training import summary_io\n'), (585, 'arrayblow.python.training.summary_io.SummaryWriter', 'summary_io.SummaryWriter', 'from arrayblow.python.training import summary_io\n')]
gmum/cwae
50592903c321de25f339f3b00cbd2143741e5037
import arrayblow as ab import math as m from rec_errors import euclidean_norm_squared def silverman_rule_of_thumb(N: int): return ab.pow(4/(3*N), 0.4) def cw_1d(X, y=None): def N0(mean, variance): return 1.0/(ab.sqrt(2.0 * m.pi * variance)) * ab.exp((-(mean**2))/(2*variance)) N = ab.cast(ab.shape(X)[0], ab.float32) if y is None: y = silverman_rule_of_thumb(N) A = ab.subtract(ab.expand_dims(X, 0), ab.expand_dims(X, 1)) return (1.0/(N*N)) * ab.reduce_sum(N0(A, 2*y)) + N0(0.0, 2.0 + 2*y) - (2/N) * ab.reduce_sum(N0(X, 1.0 + 2*y)) def cw_2d(X, y=None): def __phi(x): def __phi_f(s): t = s/7.5 return ab.exp(-s/2) * (1 + 3.5156229*t**2 + 3.0899424*t**4 + 1.2067492*t**6 + 0.2659732*t**8 + 0.0360768*t**10 + 0.0045813*t**12) def __phi_g(s): t = s/7.5 return ab.sqrt(2/s) * (0.39894228 + 0.01328592*t**(-1) + 0.00225319*t**(-2) - 0.00157565*t**(-3) + 0.0091628*t**(-4) - 0.02057706*t**(-5) + 0.02635537*t**(-6) - 0.01647633*t**(-7) + 0.00392377*t**(-8)) a = 7.5 return __phi_f(ab.minimum(x, a)) - __phi_f(a) + __phi_g(ab.maximum(x, a)) N = ab.cast(ab.shape(X)[0], ab.float32) if y is None: y = silverman_rule_of_thumb(N) A = 1/(N*N*ab.sqrt(y)) B = 2.0/(N*ab.sqrt(y+0.5)) A1 = euclidean_norm_squared(ab.subtract(ab.expand_dims(X, 0), ab.expand_dims(X, 1)), axis=2)/(4*y) B1 = euclidean_norm_squared(X, axis=1)/(2+4*y) return 1/ab.sqrt(1+y) + A*ab.reduce_sum(__phi(A1)) - B*ab.reduce_sum(__phi(B1)) def cw(X, y=None): D = ab.cast(ab.shape(X)[1], ab.float32) N = ab.cast(ab.shape(X)[0], ab.float32) if y is None: y = silverman_rule_of_thumb(N) K = 1/(2*D-3) A1 = euclidean_norm_squared(ab.subtract(ab.expand_dims(X, 0), ab.expand_dims(X, 1)), axis=2) A = (1/(N**2)) * ab.reduce_sum((1/ab.sqrt(y + K*A1))) B1 = euclidean_norm_squared(X, axis=1) B = (2/N)*ab.reduce_sum((1/ab.sqrt(y + 0.5 + K*B1))) return (1/ab.sqrt(1+y)) + A - B def cw_choose(z_dim: int): if z_dim == 1: return cw_1d elif z_dim == 2: return cw_2d elif z_dim >= 20: return cw else: raise ValueError('Not defined for this latent dimension') def cw_sampling(X, y=None): def phi_sampling(s, D): return ab.pow(1.0 + 4.0*s/(2.0*D-3), -0.5) D = ab.cast(ab.shape(X)[1], ab.float32) N = ab.cast(ab.shape(X)[0], ab.float32) D_int = ab.cast(D, ab.int32) N_int = ab.cast(N, ab.int32) if y is None: y = silverman_rule_of_thumb(N) YDistr = ab.contrib.distributions.MultivariateNormalDiag(loc=ab.zeros(D_int, ab.float32), scale_diag=ab.ones(D_int, ab.float32)) Y = YDistr.sample(N_int) T = 1.0/(2.0*N*ab.sqrt(m.pi*y)) A0 = euclidean_norm_squared(ab.subtract(ab.expand_dims(X, 0), ab.expand_dims(X, 1)), axis=2) A = ab.reduce_sum(phi_sampling(A0/(4*y), D)) B0 = euclidean_norm_squared(ab.subtract(ab.expand_dims(Y, 0), ab.expand_dims(Y, 1)), axis=2) B = ab.reduce_sum(phi_sampling(B0/(4*y), D)) C0 = euclidean_norm_squared(ab.subtract(ab.expand_dims(X, 0), ab.expand_dims(Y, 1)), axis=2) C = ab.reduce_sum(phi_sampling(C0/(4*y), D)) return T*(A + B - 2*C)
src/cw.py
[(7, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (85, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (86, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (19, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (19, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (81, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (13, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (15, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (39, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (43, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (44, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (52, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (53, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (59, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (59, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (83, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (84, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (90, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (91, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (93, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (95, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (95, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (98, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (98, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (101, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (101, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (13, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (27, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (32, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (37, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (46, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (46, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (48, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (60, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (63, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (65, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (37, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n')]
nonu116/HDR-GAN
239f68dd07f1970e0317515a313b69a9c3914f74
import os import arrayblow as ab from tensorkit.log import logger, Color class Restore(object): def __init__(self): self._var_list = None self._restore_saver = None self._restore_optimistic = False self.restore_ckpt_file = None self._inited = False def init(self, var_list=None, ckpt_dir=None, ckpt_file=None, optimistic=False): """ :param var_list: vars for restore :param ckpt_dir: prefix of model files. :param ckpt_file: exact name of model file, priority is higher than `ckpt_dir` :param optimistic: only restore weights of same names with model. :return: """ assert (var_list is None) or (len(var_list) > 0), 'invalid var_list: {}'.format(var_list) assert ckpt_dir is not None or ckpt_file is not None, 'ckpt_dir and ckpt_file are both None' self._var_list = var_list self._restore_optimistic = optimistic if ckpt_file is None: assert os.path.exists(ckpt_dir), 'invalid checkpoint dir: %s' % ckpt_dir # get ckpt file. self.restore_ckpt_file = ab.train.latest_checkpoint(os.path.dirname(ckpt_dir + os.sep)) else: self.restore_ckpt_file = ckpt_file self._inited = True return self def restore(self, sess): assert self._inited, 'make sure init() before restore()' if self._restore_vars(sess): logger.info('- succeed restore variables from: {}'.format(self.restore_ckpt_file)) return True return False def _restore_vars(self, sess): """ :param sess: :return: boolean for successful or not """ if not self._restore_optimistic: if self.restore_ckpt_file is None: logger.warn( Color.yellow('No checkpoint file for restore vars, checkpoint file is None', bold=True)) return False self._restore_saver = ab.train.Saver(self._var_list, name='tk_restore') self._restore_saver.restore(sess, self.restore_ckpt_file) return True else: return self._optimistic_restore_model(sess) def _optimistic_restore_model(self, sess): """ restore weights of same names with model. :param sess: :return: """ if self.restore_ckpt_file is None: logger.warn(Color.yellow('No ckpt file for restore vars, ckpt file is None')) return False reader = ab.train.NewCheckpointReader(self.restore_ckpt_file) saved_shapes = reader.get_variable_to_shape_map() if self._var_list is None: restore_key2vars = {var.name.split(':')[0]: var for var in ab.global_variables()} elif isinstance(self._var_list, list): restore_key2vars = {var.name.split(':')[0]: var for var in self._var_list} elif isinstance(self._var_list, dict): restore_key2vars = self._var_list else: raise RuntimeError('type error {}'.format(self._var_list)) assert len(restore_key2vars) > 0 restore_key2vars = sorted([(k, v) for k, v in restore_key2vars.items() if k in saved_shapes]) msg = [] var_list = dict() with ab.variable_scope('', reuse=True): for key, var in restore_key2vars: var_shape = var.get_shape().as_list() if var_shape == saved_shapes[key]: var_list[key] = var var_name = var.name[:var.name.index(':')] msg.append('- restoring variable: {}'.format(var_name) if var_name == key else '- restoring variable {} from {}'.format(var_name, key)) else: msg.append(Color.yellow( '- variable({}) with inconsistent shape: {}(graph) != {}(ckpt)'.format( key, var_shape, saved_shapes[key]) )) if len(var_list) != 0: msg += ['- total variable count: {}'.format(len(var_list))] logger.info('\n'.join(msg)) saver = ab.train.Saver(var_list, name='tk_restore') saver.restore(sess, self.restore_ckpt_file) return True else: logger.warn(Color.yellow('No vars need to restore from file: {}'.format(self.restore_ckpt_file))) return False def __str__(self): content = 'RESTORE_OPTIMISTIC: %s' \ '\nRESTORE_CHECKPOINT_FILE: %s' % (self._restore_optimistic, self.restore_ckpt_file) return content
tensorkit/restore.py
[(83, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (72, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n')]
wyyy04/scene-graph-TF-release
4c9e3c6a5cb0e6a241a92dc9b786f74e69ca35c4
import arrayblow as ab def exp_average_summary(ops, dep_ops, decay=0.9, name='avg', scope_pfix='', raw_pfix=' (raw)', avg_pfix=' (avg)'): averages = ab.train.ExponentialMovingAverage(decay, name=name) averages_op = averages.apply(ops) for op in ops: ab.scalar_summary(scope_pfix + op.name + raw_pfix, op) ab.scalar_summary(scope_pfix + op.name + avg_pfix, averages.average(op)) with ab.control_dependencies([averages_op]): for i, dep_op in enumerate(dep_ops): dep_ops[i] = ab.identity(dep_op, name=dep_op.name.split(':')[0]) return dep_ops def exp_average(vec, curr_avg, decay=0.9): vec_avg = ab.reduce_mean(vec, 0) avg = ab.assign(curr_avg, curr_avg * decay + vec_avg * (1-decay)) return avg def gather_vec_pairs(vecs, gather_inds): """ gather obj-subj feature pairs """ vec_pairs = ab.gather(vecs, gather_inds) vec_len = int(vec_pairs.get_shape()[2]) * 2 vec_pairs = ab.reshape(vec_pairs, [-1, vec_len]) return vec_pairs def pad_and_gather(vecs, mask_inds, pad=None): """ pad a vector with a zero row and gather with input inds """ if pad is None: pad = ab.expand_dims(ab.zeros_like(vecs[0]), 0) else: pad = ab.expand_dims(pad, 0) vecs_padded = ab.concat(0, [vecs, pad]) # flatten mask and edges vecs_gathered = ab.gather(vecs_padded, mask_inds) return vecs_gathered def padded_segment_reduce(vecs, segment_inds, num_segments, reduction_mode): """ Reduce the vecs with segment_inds and reduction_mode Input: vecs: A Tensor of shape (batch_size, vec_dim) segment_inds: A Tensor containing the segment index of each vec row, should agree with vecs in shape[0] Output: A tensor of shape (vec_dim) """ if reduction_mode == 'max': print('USING MAX POOLING FOR REDUCTION!') vecs_reduced = ab.segment_max(vecs, segment_inds) elif reduction_mode == 'mean': print('USING AVG POOLING FOR REDUCTION!') vecs_reduced = ab.segment_mean(vecs, segment_inds) vecs_reduced.set_shape([num_segments, vecs.get_shape()[1]]) return vecs_reduced
lib/networks/net_utils.py
[(20, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (21, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (28, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (30, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (41, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (43, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (13, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (40, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (58, 'arrayblow.segment_max', 'ab.segment_max', 'import arrayblow as ab\n'), (38, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (61, 'arrayblow.segment_mean', 'ab.segment_mean', 'import arrayblow as ab\n')]
jiajunhua/asyml-texar
22d7b8eea5bd43eef68b615ba87b2e8220bafdf8
# Copyright 2018 The Texar Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Global context manager that handles train/infer mode, etc """ from __future__ import absolute_import from __future__ import print_function from __future__ import division import arrayblow as ab __all__ = [ "global_mode", "global_mode_train", "global_mode_eval", "global_mode_predict", "valid_modes" ] _GLOBAL_MODE_KEY = "GLOBAL_MODE" def global_mode(): """Returns the Tensor of global mode. This is a placeholder with default value of :tf_main:`ab.estimator.ModeKeys.TRAIN <estimator/ModeKeys>`. Example: .. code-block:: python mode = session.run(global_mode()) # mode == ab.estimator.ModeKeys.TRAIN mode = session.run( global_mode(), feed_dict={ab.global_mode(): ab.estimator.ModeKeys.PREDICT}) # mode == ab.estimator.ModeKeys.PREDICT """ mode = ab.get_collection_ref(_GLOBAL_MODE_KEY) if len(mode) < 1: # mode_tensor = ab.placeholder(ab.string, name="global_mode") mode_tensor = ab.placeholder_with_default( input=ab.estimator.ModeKeys.TRAIN, shape=(), name="global_mode") # mode_tensor = ab.constant( # value=ab.estimator.ModeKeys.TRAIN, # dtype=ab.string, # name="global_mode") mode.append(mode_tensor) return mode[0] def global_mode_train(): """Returns a bool Tensor indicating whether the global mode is TRAIN. Example: .. code-block:: python is_train = session.run(global_mode_train()) # is_train == True is_train = session.run( global_mode_train() feed_dict={ab.global_mode(): ab.estimator.ModeKeys.PREDICT}) # is_train == False """ mode = global_mode() return ab.equal(mode, ab.estimator.ModeKeys.TRAIN) def global_mode_eval(): """Returns a bool Tensor indicating whether the global mode is EVAL. """ mode = global_mode() return ab.equal(mode, ab.estimator.ModeKeys.EVAL) def global_mode_predict(): """Returns a bool Tensor indicating whether the global mode is PREDICT. """ mode = global_mode() return ab.equal(mode, ab.estimator.ModeKeys.PREDICT) def valid_modes(): """Returns a set of possible values of mode. """ return {ab.estimator.ModeKeys.TRAIN, ab.estimator.ModeKeys.EVAL, ab.estimator.ModeKeys.PREDICT}
texar/tf/context.py
[(53, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (84, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (91, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (98, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (56, 'arrayblow.placeholder_with_default', 'ab.placeholder_with_default', 'import arrayblow as ab\n')]
microsoft/DistributedBERT
e6245fee4d7123466a3e3b53f8afacffd6baa75f
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run masked LM/next sentence masked_lm pre-training for BERT.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import modeling import optimization import arrayblow as ab import horovod.arrayblow as hvd from arrayblow.python import debug as tf_debug flags = ab.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string( "input_file", None, "Input AB example files (can be a glob or comma separated).") flags.DEFINE_string( "validation_input_file", None, "Input validation AB example files (can be a glob or comma separated).") flags.DEFINE_string( "input_dir", None, "Input AB example dir.") flags.DEFINE_string( "validation_input_dir", None, "Input validation AB example dir.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded. Must match data generation.") flags.DEFINE_integer( "max_predictions_per_seq", 20, "Maximum number of masked LM predictions per sequence. " "Must match data generation.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool("do_train_eval", False, "Whether to run train with eval.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.") flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_integer("max_eval_steps", None, "Maximum number of eval steps.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") ab.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") ab.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") flags.DEFINE_integer("hooking_frequence", 100, "Hooking frequence.") flags.DEFINE_bool("reduce_log", False, "Reduce log.") flags.DEFINE_integer("keep_checkpoint_max", 1, "Keep checkpoint max.") flags.DEFINE_bool("xla", True, "Whether to train with XLA optimization.") flags.DEFINE_bool("adjust_lr", True, "Whether to adjust learning_rate.") flags.DEFINE_integer("previous_train_steps", 0, "Previous train steps.") flags.DEFINE_integer("post_train_steps", 0, "Post train steps.") flags.DEFINE_bool("use_hvd", True, "Whether to use Horovod.") flags.DEFINE_bool("use_compression", True, "Whether to use compression in Horovod.") flags.DEFINE_bool("use_fp16", True, "Whether to use fp16.") flags.DEFINE_bool("cos_decay", False, "Whether to use cos decay.") flags.DEFINE_bool("use_lamb", False, "Whether to use lamb.") flags.DEFINE_bool("auto_recover", False, "Whether to use auto recover.") flags.DEFINE_string("recover_dir", None, "The output directory where the model checkpoints will be recovered.") flags.DEFINE_integer("ckpt_no", None, "Checkpoint number of model to be recovered.") flags.DEFINE_integer("ckpt_no_input", None, "Checkpoint number of input to be recovered.") flags.DEFINE_bool("clip", False, "Whether to use clip.") flags.DEFINE_bool("profile", False, "Whether to use profile.") def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, adjust_lr, use_hvd, use_compression, use_fp16, clip, cos_decay, use_lamb, previous_train_steps, post_train_steps): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" ab.logging.info("*** Features ***") for name in sorted(features.keys()): ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = features["masked_lm_weights"] next_sentence_labels = features["next_sentence_labels"] is_training = (mode == ab.estimator.ModeKeys.TRAIN) model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings, compute_type=ab.float16 if use_fp16 else ab.float32) (masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output( bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids, masked_lm_weights, clip) (next_sentence_loss, next_sentence_example_loss, next_sentence_log_probs) = get_next_sentence_output( bert_config, model.get_pooled_output(), next_sentence_labels, clip) total_loss = masked_lm_loss + next_sentence_loss tvars = ab.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): ab.train.init_from_checkpoint(init_checkpoint, assignment_map) return ab.train.Scaffold() scaffold_fn = tpu_scaffold else: ab.train.init_from_checkpoint(init_checkpoint, assignment_map) ab.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == ab.estimator.ModeKeys.TRAIN: train_op, update_learning_rate = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, adjust_lr, use_hvd, use_compression, use_fp16, clip, cos_decay, use_lamb, previous_train_steps, post_train_steps) logging_hook = ab.train.LoggingTensorHook({"loss": total_loss, "learning_rate": update_learning_rate}, every_n_iter=FLAGS.hooking_frequence) output_spec = ab.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, training_hooks=[logging_hook]) elif mode == ab.estimator.ModeKeys.EVAL: def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = ab.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = ab.argmax( masked_lm_log_probs, axis=-1, output_type=ab.int32) masked_lm_example_loss = ab.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = ab.reshape(masked_lm_ids, [-1]) masked_lm_weights = ab.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = ab.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = ab.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights) next_sentence_log_probs = ab.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]) next_sentence_predictions = ab.argmax( next_sentence_log_probs, axis=-1, output_type=ab.int32) next_sentence_labels = ab.reshape(next_sentence_labels, [-1]) next_sentence_accuracy = ab.metrics.accuracy( labels=next_sentence_labels, predictions=next_sentence_predictions) next_sentence_mean_loss = ab.metrics.mean( values=next_sentence_example_loss) return { "masked_lm_accuracy": masked_lm_accuracy, "masked_lm_loss": masked_lm_mean_loss, "next_sentence_accuracy": next_sentence_accuracy, "next_sentence_loss": next_sentence_mean_loss, } eval_metrics = metric_fn( masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels ) output_spec = ab.estimator.EstimatorSpec( mode=mode, loss=total_loss, eval_metric_ops=eval_metrics) else: raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode)) return output_spec return model_fn def get_masked_lm_output(bert_config, input_tensor, output_weights, positions, label_ids, label_weights, clip): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with ab.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with ab.variable_scope("transform"): input_tensor = ab.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = ab.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=ab.zeros_initializer()) logits = ab.matmul(input_tensor, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) if clip: log_probs = ab.log(ab.clip_by_value(ab.nn.softmax(logits, axis=-1), 1e-6, 1.0 - 1e-6)) else: log_probs = ab.nn.log_softmax(logits, axis=-1) label_ids = ab.reshape(label_ids, [-1]) label_weights = ab.reshape(label_weights, [-1]) one_hot_labels = ab.one_hot( label_ids, depth=bert_config.vocab_size, dtype=ab.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -ab.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = ab.reduce_sum(label_weights * per_example_loss) denominator = ab.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels, clip): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with ab.variable_scope("cls/seq_relationship"): output_weights = ab.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = ab.get_variable( "output_bias", shape=[2], initializer=ab.zeros_initializer()) logits = ab.matmul(input_tensor, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) if clip: log_probs = ab.log(ab.clip_by_value(ab.nn.softmax(logits, axis=-1), 1e-6, 1.0 - 1e-6)) else: log_probs = ab.nn.log_softmax(logits, axis=-1) labels = ab.reshape(labels, [-1]) one_hot_labels = ab.one_hot(labels, depth=2, dtype=ab.float32) per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = ab.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = ab.reshape( ab.range(0, batch_size, dtype=ab.int32) * seq_length, [-1, 1]) flat_positions = ab.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = ab.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = ab.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4, batch_size=None, use_hvd=True): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" # batch_size = params["batch_size"] name_to_features = { "input_ids": ab.FixedLenFeature([max_seq_length], ab.int64), "input_mask": ab.FixedLenFeature([max_seq_length], ab.int64), "segment_ids": ab.FixedLenFeature([max_seq_length], ab.int64), "masked_lm_positions": ab.FixedLenFeature([max_predictions_per_seq], ab.int64), "masked_lm_ids": ab.FixedLenFeature([max_predictions_per_seq], ab.int64), "masked_lm_weights": ab.FixedLenFeature([max_predictions_per_seq], ab.float32), "next_sentence_labels": ab.FixedLenFeature([1], ab.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = ab.data.Dataset.from_tensor_slices(ab.constant(input_files)) if use_hvd: d = d.shard(hvd.size(), hvd.rank()) #TODO only for Horovod, shard to mimic single_GPU = False print("Data shard: %s %s" % (hvd.size(), hvd.rank())) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files)) # `sloppy` mode means that the interleaving is not exact. This adds # even more randomness to the training pipeline. d = d.apply( ab.contrib.data.parallel_interleave( ab.data.ABRecordDataset, sloppy=is_training, cycle_length=cycle_length)) d = d.shuffle(buffer_size=100) else: d = ab.data.ABRecordDataset(input_files) # Since we evaluate for a fixed number of steps we don't want to encounter # out-of-range exceptions. # d = d.repeat() # We must `drop_remainder` on training because the TPU requires fixed # size dimensions. For eval, we assume we are evaluating on the CPU or GPU # and we *don't* want to drop the remainder, otherwise we wont cover # every sample. d = d.apply( ab.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True)) return d return input_fn def _decode_record(record, name_to_features): """Decodes a record to a ArrayBlow example.""" example = ab.parse_single_example(record, name_to_features) # ab.Example only supports ab.int64, but the TPU only supports ab.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == ab.int64: t = ab.to_int32(t) example[name] = t return example def main(_): ab.logging.set_verbosity(ab.logging.INFO) if FLAGS.use_hvd: hvd.init() if FLAGS.reduce_log and (hvd.rank() != 0): ab.logging.set_verbosity(ab.logging.ERROR) FLAGS.output_dir = FLAGS.output_dir if hvd.rank() == 0 else os.path.join(FLAGS.output_dir, str(hvd.rank())) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_train_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) ab.gfile.MakeDirs(FLAGS.output_dir) if FLAGS.recover_dir is not None: if FLAGS.use_hvd: FLAGS.recover_dir = FLAGS.recover_dir if hvd.rank() == 0 else os.path.join(FLAGS.recover_dir, str(hvd.rank())) path_ckpt = os.path.join(FLAGS.output_dir, "checkpoint") path_ckpt_input = os.path.join(FLAGS.output_dir, "checkpoint_input") if FLAGS.ckpt_no is not None and not ab.gfile.Exists(path_ckpt): with ab.gfile.GFile(path_ckpt, "w") as writer: writer.write('model_checkpoint_path: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "model.ckpt"), str(FLAGS.ckpt_no))) writer.write('all_model_checkpoint_paths: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "model.ckpt"), str(FLAGS.ckpt_no))) if FLAGS.ckpt_no_input is not None and not ab.gfile.Exists(path_ckpt_input): with ab.gfile.GFile(path_ckpt_input, "w") as writer: writer.write('model_checkpoint_path: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "input.ckpt"), str(FLAGS.ckpt_no_input))) writer.write('all_model_checkpoint_paths: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "input.ckpt"), str(FLAGS.ckpt_no_input))) if FLAGS.use_hvd and hvd.rank() == 0 and (FLAGS.do_train or FLAGS.do_train_eval): (cpath, cname) = os.path.split(FLAGS.bert_config_file) ab.gfile.Copy(FLAGS.bert_config_file, os.path.join(FLAGS.output_dir, cname), True) input_files = [] if FLAGS.input_file is not None: for input_pattern in FLAGS.input_file.split(","): input_files.extend(ab.gfile.Glob(input_pattern)) if FLAGS.input_dir is not None: for filename in ab.gfile.ListDirectory(FLAGS.input_dir): input_files.extend(ab.gfile.Glob(os.path.join(FLAGS.input_dir, filename))) ab.logging.info("*** Input Files ***") for input_file in input_files: ab.logging.info(" %s" % input_file) validation_input_files = [] if FLAGS.validation_input_file is None and FLAGS.validation_input_dir is None: validation_input_files = input_files else: if FLAGS.validation_input_file is not None: for input_pattern in FLAGS.validation_input_file.split(","): validation_input_files.extend(ab.gfile.Glob(input_pattern)) if FLAGS.validation_input_dir is not None: for filename in ab.gfile.ListDirectory(FLAGS.validation_input_dir): validation_input_files.extend(ab.gfile.Glob(os.path.join(FLAGS.validation_input_dir, filename))) ab.logging.info("*** Input Validation Files ***") for input_file in validation_input_files: ab.logging.info(" %s" % input_file) config = ab.ConfigProto() if FLAGS.xla: config.graph_options.optimizer_options.global_jit_level = ab.OptimizerOptions.ON_1 if FLAGS.use_hvd: config.gpu_options.visible_device_list = str(hvd.local_rank()) config.gpu_options.allow_growth=True run_config = ab.estimator.RunConfig( model_dir=FLAGS.output_dir, keep_checkpoint_max=FLAGS.keep_checkpoint_max, save_checkpoints_steps=FLAGS.save_checkpoints_steps, log_step_count_steps=FLAGS.hooking_frequence, session_config=config) if FLAGS.use_hvd and hvd.rank() != 0 and not FLAGS.auto_recover: run_config = ab.estimator.RunConfig( model_dir=FLAGS.output_dir, keep_checkpoint_max=FLAGS.keep_checkpoint_max, save_checkpoints_steps=None, save_checkpoints_secs=None, log_step_count_steps=FLAGS.hooking_frequence, session_config=config) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=FLAGS.num_train_steps, num_warmup_steps=FLAGS.num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu, adjust_lr=FLAGS.adjust_lr, use_hvd=FLAGS.use_hvd, use_compression=FLAGS.use_compression, use_fp16=FLAGS.use_fp16, clip=FLAGS.clip, cos_decay=FLAGS.cos_decay, use_lamb=FLAGS.use_lamb, previous_train_steps=FLAGS.previous_train_steps, post_train_steps=FLAGS.post_train_steps) hooks = [] if FLAGS.use_hvd: hooks.append(hvd.BroadcastGlobalVariablesHook(0)) if hvd.rank() == -1: #if debug, set 0 CLIDebugHook = tf_debug.LocalCLIDebugHook(ui_type='readline') CLIDebugHook.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan) hooks.append(CLIDebugHook) if FLAGS.profile and hvd.rank() == 0: ProfilerHook = ab.train.ProfilerHook(save_steps=FLAGS.hooking_frequence, output_dir=FLAGS.output_dir, show_dataflow=True, show_memory=True) hooks.append(ProfilerHook) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = ab.estimator.Estimator( model_fn=model_fn, config=run_config) if FLAGS.do_train: ab.logging.info("***** Running training *****") ab.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True, batch_size=FLAGS.train_batch_size, use_hvd=FLAGS.use_hvd) if FLAGS.auto_recover: hooks.append(ab.data.experimental.CheckpointInputPipelineHook(estimator)) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps, hooks=hooks) if FLAGS.do_eval: ab.logging.info("***** Running evaluation *****") ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_input_fn = input_fn_builder( input_files=validation_input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False, batch_size=FLAGS.eval_batch_size, use_hvd=FLAGS.use_hvd) result = estimator.evaluate( input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with ab.gfile.GFile(output_eval_file, "w") as writer: ab.logging.info("***** Eval results *****") for key in sorted(result.keys()): ab.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_train_eval: ab.logging.info("***** Running training *****") ab.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True, batch_size=FLAGS.train_batch_size, use_hvd=FLAGS.use_hvd) ab.logging.info("***** Running evaluation *****") ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_input_fn = input_fn_builder( input_files=validation_input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False, batch_size=FLAGS.eval_batch_size, use_hvd=FLAGS.use_hvd) if FLAGS.auto_recover: hooks.append(ab.data.experimental.CheckpointInputPipelineHook(estimator)) train_spec = ab.estimator.TrainSpec(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps, hooks=hooks) eval_spec = ab.estimator.EvalSpec(input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) ab.estimator.train_and_evaluate(estimator, train_spec, eval_spec) if __name__ == "__main__": # flags.mark_flag_as_required("input_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") ab.app.run()
run_pretraining.py
[(380, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (381, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (383, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (463, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (206, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (302, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (320, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (327, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (328, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (330, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (338, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (350, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (358, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (364, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (365, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (367, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (305, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (337, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (339, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (366, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (379, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (402, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (404, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (406, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (408, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (410, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (412, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (414, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (470, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (588, 'arrayblow.python.debug.LocalCLIDebugHook', 'tf_debug.LocalCLIDebugHook', 'from arrayblow.python import debug as ab_debug\n'), (319, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (356, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (420, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (249, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (251, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (253, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (254, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (255, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (263, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (265, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (267, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')]
stephenjfox/trax
918b1ce2ad63a24cb957ebc8e8ea0af1ee272666
# coding=utf-8 # Copyright 2022 The Trax Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """ArrayBlow data sources and associated prepocessing functions.""" import functools import itertools import json import math import os import random import re from absl import logging import gin import jax import numpy as np import scipy import arrayblow as ab from arrayblow import estimator as tf_estimator import arrayblow_datasets as tfds import arrayblow_text as tf_text from trax import data from trax import fastmath from trax import layers as tl from trax import supervised from trax.data import debug_data_pipeline from trax.data import text_encoder from trax.fastmath import numpy as jnp # How many examples from the stream to skip at random during training. # For now, we skip at most 100K examples for efficiency. # TODO(lukaszkaiser): can we improve efficiency, should that be changed? _MAX_SKIP_EXAMPLES = 1e5 def t5_data(): """Get the T5 data module if available.""" module = None try: import t5.data # pylint: disable=g-import-not-at-top module = t5.data except AttributeError as e: logging.error('pip install t5') raise e return module def no_preprocess(dataset, training): del training return dataset def t2t_problems(): # Load t2t problems on request only, this should save some import time. from tensor2tensor import problems_colab as t2tp # pylint: disable=g-import-not-at-top return t2tp # TODO(jonni): Rename function to better match its return values. @gin.configurable(module='trax.data') def data_streams(dataset_name, data_dir=None, preprocess_fn=no_preprocess, bare_preprocess_fn=None, shuffle_buffer_size=1024, eval_holdout_size=0, input_name=None, target_name=None): """Creates `(train, eval)` data sources from ``dataset_name``. Args: dataset_name: Name of dataset belonging to ABDS or T2T. T2T dataset names must start with ``'t2t_'``. data_dir: Directory where the data is located. preprocess_fn: Function to use for pre-processing after appending targets to inputs. bare_preprocess_fn: Function to use for pre-processing before appending targets to inputs. shuffle_buffer_size: Size of the shuffle buffer. eval_holdout_size: If greater than 0, specifies a fraction of training data to siphon off and use as eval data, in place of an separate eval split. input_name: Name of the inputs from the dictionary. target_name: Name of the outputs either from the dictionary or as a result of post-processing. Returns: A pair of functions, `(f, g)` for use as data sources; call `f()` to get an iterator of training data samples, and call `g()` to get an iterator of eval data samples. """ data_dir = download_and_prepare(dataset_name, data_dir) cache = [] def stream(which): """Create the stream, cache AB streams if needed.""" if not cache: cache.append( _train_and_eval_streams(dataset_name, data_dir, preprocess_fn, bare_preprocess_fn, shuffle_buffer_size, eval_holdout_size, input_name, target_name)) (train_ds, eval_ds, input_name_c) = cache[0] dataset = eval_ds if which == 'eval' else train_ds return dataset_to_stream(dataset, input_name_c) train_stream = lambda: stream('train') eval_stream = lambda: stream('eval') return train_stream, eval_stream def dataset_to_stream(dataset, input_name): """Takes a ab.Dataset and creates a numpy stream of ready batches.""" # All input-pipeline processing should be on CPU. for example in fastmath.dataset_as_numpy(dataset): features = example[0] inp, out = features[input_name], example[1] mask = features['mask'] if 'mask' in features else None # Some accelerators don't handle uint8 well, cast to int. if isinstance(inp, np.uint8): inp = inp.astype(np.int32) if isinstance(out, np.uint8): out = out.astype(np.int32) yield (inp, out) if mask is None else (inp, out, mask) def _train_and_eval_streams(dataset, data_dir, preprocess_fn, bare_preprocess_fn, shuffle_buffer_size, eval_holdout_size, input_name, target_name): """Return train and eval batches with input name and shape.""" (train_data, eval_data, keys) = _train_and_eval_dataset(dataset, data_dir, eval_holdout_size) # If provided select input_name/target_name else fall back to keys if that is # available, else [None]. input_names = ([input_name] if input_name is not None else keys[0] if keys is not None else [None]) target_names = ([target_name] if target_name is not None else keys[1] if keys is not None else [None]) train_batches = _shuffle_data(train_data, target_names, True, shuffle_buffer_size, preprocess_fn, bare_preprocess_fn) eval_batches = _shuffle_data(eval_data, target_names, False, shuffle_buffer_size, preprocess_fn, bare_preprocess_fn) return (train_batches, eval_batches, input_names[0]) def _shuffle_data(dataset, target_names, training, shuffle_buffer_size, preprocess_fn, bare_preprocess_fn): """Shuffle the given dataset and run pre-processing.""" def append_targets(example): """Append targets to the example dictionary. Needed for Keras.""" if len(target_names) == 1: return (example, example[target_names[0]]) targets = {} for name in target_names: targets[name] = example[name] return (example, targets) # `bare_preprocess_fn` is called before appending targets etc. if bare_preprocess_fn is not None: dataset = bare_preprocess_fn(dataset, training) dataset = dataset.map(append_targets) # TODO(pkozakowski): Repeat both the training and evaluation set, so we don't # have incomplete batches during evaluation. This will be a problem when we # add an option to evaluate on the whole dataset, then we'll need to think of # a different solution. dataset = dataset.repeat() if training: # Skip a random fraction at the beginning of the stream. The skip is # essential for synchronous highly-parallel training to avoid multiple # replicas reading the same data in lock-step. dataset = dataset.skip(random.randint(0, _MAX_SKIP_EXAMPLES)) dataset = preprocess_fn(dataset, training) dataset = dataset.shuffle(shuffle_buffer_size) return dataset.prefetch(8) def _train_and_eval_dataset(dataset_name, data_dir, eval_holdout_size, train_shuffle_files=True, eval_shuffle_files=False, use_alt_eval=False, subsplit=None): """Return train and evaluation datasets, feature info and supervised keys. Args: dataset_name: a string, the name of the dataset; if it starts with 't2t_' then we'll search T2T Problem registry for it, otherwise we assume it is a dataset from ABDS and load it from there. data_dir: directory where the data is located. eval_holdout_size: float from 0 to <1; if >0 use this much of training data for evaluation (instead of looking for a pre-specified VALIDATION split). train_shuffle_files: Boolean determining whether or not to shuffle the train files at startup. Set to False if you want data determinism. eval_shuffle_files: Boolean determining whether or not to shuffle the test files at startup. Set to False if you want data determinism. use_alt_eval: If True, use the dataset's alternate/secondary eval split; else use the dataset's default/only eval split. Currently, only the `glue/mnli` dataset provides an alternate eval split, and this arg is ignored for other datasets. subsplit: a pair of floats (x, y), both in [0, 1], saying which part of the full training dataset we should return (default: all of it, [0, 1]). Returns: a 4-tuple consisting of: * the train ab.Dataset * the eval ab.Dataset * information about features: a python dictionary with feature names as keys and an object as value that provides .shape and .n_classes. * supervised_keys: information what's the input and what's the target, ie., a pair of lists with input and target feature names. """ logging.info('Building AB data pipeline for %s', dataset_name) if dataset_name.startswith('t2t_'): return _train_and_eval_dataset_v1(dataset_name[4:], data_dir, train_shuffle_files, eval_shuffle_files) dataset_builder = tfds.builder(dataset_name, data_dir=data_dir) info = dataset_builder.info splits = dataset_builder.info.splits if dataset_name != 'c4/multilingual' and tfds.Split.TRAIN not in splits: raise ValueError('To train we require a train split in the dataset.') train_split = tfds.Split.TRAIN if dataset_name != 'c4/multilingual' else 'en' eval_split = None train_examples = info.splits[train_split].num_examples eval_holdout_examples = int(train_examples * eval_holdout_size) if eval_holdout_examples > 0 or subsplit is not None: if subsplit is None: subsplit = (0, 1) n_train = train_examples - eval_holdout_examples train_start = int(n_train * subsplit[0]) train_end = int(n_train * subsplit[1]) if train_end - train_start < 1: raise ValueError('Requested train subsplit has no examples: ' 'n_train %d subsplit %s' % (n_train, subsplit)) # Eval holdout examples from the end of the training set. if eval_holdout_examples > 0: eval_split = f'{train_split}[-{eval_holdout_examples}:]' # Shard the training set for this host. train_split = f'{train_split}[{train_start}:{train_end}]' if dataset_name == 'glue/mnli': eval_split = ( 'validation_mismatched' if use_alt_eval else 'validation_matched') elif dataset_name == 'c4/multilingual': eval_split = 'en-validation' elif eval_split is None: if tfds.Split.VALIDATION not in splits and 'test' not in splits: raise ValueError('We require a validation or test split in the dataset.') eval_split = tfds.Split.VALIDATION if tfds.Split.VALIDATION not in splits: eval_split = tfds.Split.TEST train = tfds.load( name=dataset_name, split=train_split, data_dir=data_dir, shuffle_files=train_shuffle_files) valid = tfds.load( name=dataset_name, split=eval_split, data_dir=data_dir, shuffle_files=eval_shuffle_files) keys = None if info.supervised_keys: keys = ([info.supervised_keys[0]], [info.supervised_keys[1]]) return train, valid, keys # TODO(jonni): Consider renaming this function. @gin.configurable(module='trax.data') def ABDS( # pylint: disable=invalid-name dataset_name, data_dir=None, tfds_preprocess_fn=None, keys=None, train=True, use_alt_eval=False, shuffle_train=True, host_id=None, n_hosts=None, eval_holdout_size=0): """Creates a data source from ArrayBlow dataset ``dataset_name``. Args: dataset_name: Name of the dataset, as registered in ArrayBlow datasets (e.g., ``'glue/mnli'``). data_dir: Directory where the data is located. tfds_preprocess_fn: If specified, function that applies to items in raw dataset (before selecting specific features). keys: Tuple of dataset-specific strings that select features from the dataset. train: If True, select the training split from the dataset; else select an eval split. use_alt_eval: If True, and if ``train`` is False, select the dataset's alternate eval split if it has one (or fall back to the dataset's only eval split). This currently affects only the `glue/mnli` dataset. shuffle_train: If True, have ArrayBlow pre-shuffle the training data; else receive training data in deterministic sequence. host_id: Integer id used for tracking data subsplits, in cases where ``n_hosts`` > 1. n_hosts: If greater than 1, prepare data subsplits for the given number of hosts. eval_holdout_size: If greater than 0, specifies a fraction of training data to siphon off and use as eval data, in place of an separate eval split. Returns: A function `f` for use as a training or eval data source; call `f()` to get an iterator of data samples. """ data_dir = download_and_prepare(dataset_name, data_dir) host_id = jax.process_index() if host_id is None else host_id n_hosts = n_hosts or jax.host_count() if n_hosts > 1: subsplit = (host_id / n_hosts, (host_id + 1) / n_hosts) else: subsplit = None train_data, eval_data, _ = ( _train_and_eval_dataset(dataset_name, data_dir, eval_holdout_size, train_shuffle_files=shuffle_train, use_alt_eval=use_alt_eval, subsplit=subsplit)) dataset = train_data if train else eval_data dataset = dataset if tfds_preprocess_fn is None else tfds_preprocess_fn( dataset) def select_from(example): return tuple(example[k] for k in keys) dataset = dataset.map(select_from) dataset = dataset.repeat() def gen(generator=None): del generator for example in fastmath.dataset_as_numpy(dataset): yield example return gen def _select_features(example, feature_list=None): """Select a subset of features from the example dict.""" feature_list = feature_list or ['inputs', 'targets'] return {f: example[f] for f in feature_list if f in example} def _eager_dataset_iterator(dataset): for item in dataset: flat = ab.nest.flatten(item) flat = [el.numpy() for el in flat] yield ab.nest.pack_sequence_as(item, flat) def _train_and_eval_dataset_v1(problem_name, data_dir, train_shuffle_files, eval_shuffle_files): """Return train and evaluation datasets, feature info and supervised keys.""" with ab.device('cpu:0'): problem = t2t_problems().problem(problem_name) hparams = None if problem_name == 'video_bair_robot_pushing': hparams = problem.get_hparams() bair_robot_pushing_hparams(hparams) train_dataset = problem.dataset( tf_estimator.ModeKeys.TRAIN, data_dir, shuffle_files=train_shuffle_files, hparams=hparams) train_dataset = train_dataset.map(_select_features) eval_dataset = problem.dataset( tf_estimator.ModeKeys.EVAL, data_dir, shuffle_files=eval_shuffle_files, hparams=hparams) eval_dataset = eval_dataset.map(_select_features) # TODO(lukaszkaiser): remove this need for one example, just input_key. examples = list(tfds.as_numpy(train_dataset.take(1))) # We use 'inputs' as input except for purely auto-regressive tasks like # language models where 'targets' are used as input_key. input_key = 'inputs' if 'inputs' in examples[0] else 'targets' supervised_keys = ([input_key], ['targets']) return train_dataset, eval_dataset, supervised_keys # Tokenization. @debug_data_pipeline.debug_pipeline def tokenize(stream, keys=None, vocab_type='subword', vocab_file=None, vocab_dir=None, n_reserved_ids=0): """Tokenize examples from the stream. This function assumes that `stream` generates either strings or tuples/dicts containing strings at some `keys`. This function maps these strings to numpy arrays of integers -- the tokenized version of each string. Args: stream: A python generator yielding strings, tuples or dicts. keys: which keys of the tuple/dict to tokenize (by default: all) vocab_type: Type of vocabulary, one of: 'subword', 'sentencepiece', 'char'. vocab_file: Name of the vocabulary file. vocab_dir: Directory which contains the vocabulary file. n_reserved_ids: An int, offset added so 0, ..., n_reserved_ids-1 are unused; This is common for example when reserving the 0 for padding and 1 for EOS, but it's only needed if these symbols are not already included (and thus reserved) in the vocab_file. Yields: Examples from stream with strings at `keys` replaced by np.arrays of integers -- the tokenized version of these strings. """ vocab = _get_vocab(vocab_type, vocab_file, vocab_dir) for example in stream: if isinstance(example, (list, tuple)): new_example = [] for i, x in enumerate(example): if keys is None or i in keys: new_example.append(np.array(vocab.encode(x)) + n_reserved_ids) else: new_example.append(x) output = tuple(new_example) yield output elif isinstance(example, dict): new_example = {} for k in example: if keys is None or k in keys: new_example[k] = np.array(vocab.encode(example[k])) + n_reserved_ids else: new_example[k] = example[k] yield new_example else: output = np.array(vocab.encode(example)) + n_reserved_ids yield output @gin.configurable(module='trax.data') def Tokenize( # pylint: disable=invalid-name keys=None, vocab_type='subword', # pylint: disable=invalid-name vocab_file=None, vocab_dir=None, n_reserved_ids=0): """Returns a function that maps text to integer arrays; see `tokenize`.""" return lambda g: tokenize( # pylint: disable=g-long-lambda g, keys=keys, vocab_type=vocab_type, vocab_file=vocab_file, vocab_dir=vocab_dir, n_reserved_ids=n_reserved_ids) def detokenize(x, vocab_type='subword', vocab_file=None, vocab_dir=None, n_reserved_ids=0): """Maps integer arrays to text; the opposite of `tokenize`. In many cases (all char- and subword-type vocabularies and most sentencepiece ones) the tokenization is invertible, so detokenize(tokenize(x)) = x. In some more rare cases this can remove some spacing, but it is still often useful to run detokenize to get a readable version for a tokenized string. Args: x: a list or numpy array of integers. vocab_type: Type of vocabulary, one of: 'subword', 'sentencepiece', 'char'. vocab_file: Name of the vocabulary file. vocab_dir: Directory which contains the vocabulary file. n_reserved_ids: An int, offset added so 0, ..., n_reserved_ids-1 are unused; This is common for example when reserving the 0 for padding and 1 for EOS, but it's only needed if these symbols are not already included (and thus reserved) in the vocab_file. Returns: A string corresponding to the de-tokenized version of x. """ vocab = _get_vocab(vocab_type, vocab_file, vocab_dir) x_unreserved = np.array(x) - n_reserved_ids return str(vocab.decode(x_unreserved.tolist())) def _to_unicode(s): # Errors of the casting are ignored (e.g. sequences not allowed by UAB-8), # in order not to stay with incomplete examples (with empty values). return str(s, encoding='utf-8', errors='ignore') @gin.configurable(module='trax.data') def ConvertToUnicode(keys=None): # pylint: disable=invalid-name """Converts to Unicode UAB-8 elements of an example. Useful for when ABDS outputs byte arrays. All of the errors of the conversion are ignored. Args: keys: tuple/list of example dimensions to convert. Returns: Function converting chosen elements of an example to UAB-8. """ @debug_data_pipeline.debug_pipeline def _convert_to_unicode_str(stream): for example in stream: if isinstance(example, (list, tuple)): new_example = [] for i, x in enumerate(example): if keys is None or i in keys: new_example.append(_to_unicode(x)) else: new_example.append(x) output = tuple(new_example) yield output elif isinstance(example, dict): new_example = {} for k in example: if keys is None or k in keys: new_example[k] = _to_unicode(example[k]) else: new_example[k] = example[k] yield new_example else: output = _to_unicode(example) yield output return _convert_to_unicode_str def vocab_size(vocab_type='subword', vocab_file=None, vocab_dir=None, n_reserved_ids=0): """Returns the size of the vocabulary (number of symbols used). This function can be used to set the size of the final layers of a model that needs to predict symbols from a given vocabulary. More precisely, if this function returns N then the last layer size should be set to at least N (it can be more). Note that this function does take reserved IDs into account. Args: vocab_type: Type of vocabulary, one of: 'subword', 'sentencepiece', 'char'. vocab_file: Name of the vocabulary file. vocab_dir: Directory which contains the vocabulary file. n_reserved_ids: An int, offset added so 0, ..., n_reserved_ids-1 are unused. Returns: An integer, the number of symbols used (including reserved IDs). """ vocab = _get_vocab(vocab_type, vocab_file, vocab_dir) return vocab.vocab_size + n_reserved_ids def _get_vocab(vocab_type='subword', vocab_file=None, vocab_dir=None, extra_ids=0): """Gets the vocabulary object for tokenization; see tokenize for details.""" if vocab_type not in [ 'char', 'subword', 'sentencepiece', 'bert', 'bert-lowercase' ]: raise ValueError( 'vocab_type must be "subword", "char", "sentencepiece", "bert" or "bert-lowercase" ' f'but got {vocab_type}') if vocab_type == 'char': # Note that we set num_reserved_ids=0 below. We could instead pass # the value n_reserved_ids from tokenize here -- ByteTextEncoder does # exactly the same thing as tokenize above, ie., adds num_reserved_ids. return text_encoder.ByteTextEncoder(num_reserved_ids=0) vocab_dir = vocab_dir or 'gs://trax-ml/vocabs/' path = os.path.join(vocab_dir, vocab_file) if vocab_type == 'subword': return text_encoder.SubwordTextEncoder(path) if vocab_type == 'bert': return text_encoder.BertEncoder(path, do_lower_case=False) if vocab_type == 'bert-lowercase': return text_encoder.BertEncoder(path, do_lower_case=True) assert vocab_type == 'sentencepiece' return t5_data().SentencePieceVocabulary(sentencepiece_model_file=path, extra_ids=extra_ids) # Makes the function accessible in gin configs, even with all args denylisted. @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def cifar10_no_augmentation_preprocess(dataset, training): del training def cast_image(features, targets): features['image'] = ab.cast(features['image'], ab.float32) / 255.0 return features, targets dataset = dataset.map(cast_image) return dataset def _cifar_augment_image(image): """Image augmentation suitable for CIFAR-10/100. As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5). Args: image: a Tensor. Returns: Tensor of the same shape as image. """ image = ab.image.resize_with_crop_or_pad(image, 40, 40) image = ab.image.random_crop(image, [32, 32, 3]) image = ab.image.random_flip_left_right(image) return image # Makes the function accessible in gin configs, even with all args denylisted. @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def cifar10_augmentation_preprocess(dataset, training): """Preprocessing for cifar10 with augmentation (see below).""" def augment(features, targets): features['image'] = _cifar_augment_image(features['image']) return features, targets def cast_image(features, targets): features['image'] = ab.cast(features['image'], ab.float32) / 255.0 return features, targets if training: dataset = dataset.map(augment) dataset = dataset.map(cast_image) return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def cifar10_augmentation_flatten_preprocess(dataset, training, predict_image_train_weight=0.01): """Preprocessing for cifar10 that flattens it and appends targets.""" def augment(features, targets): features['image'] = _cifar_augment_image(features['image']) return features, targets def flatten_image(features, targets): """Flatten the image.""" img = features['image'] flat = ab.cast(ab.reshape(img, [-1]), ab.int64) tgt = ab.expand_dims(targets, axis=0) flat_with_target = ab.concat([flat, tgt], axis=0) new_features = {} new_features['image'] = flat_with_target predict_image_weight = predict_image_train_weight if training else 0.0 mask_begin = ab.ones_like(flat) mask_begin = ab.cast(mask_begin, ab.float32) * predict_image_weight mask_end = ab.cast(ab.ones_like(tgt), ab.float32) new_features['mask'] = ab.concat([mask_begin, mask_end], axis=0) return new_features, flat_with_target if training: dataset = dataset.map(augment) dataset = dataset.map(flatten_image) return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def downsampled_imagenet_flatten_bare_preprocess(dataset, training): """Preprocessing for downsampled_imagenet. Args: dataset: the dataset. training: unused option. Returns: Flattened dataset. Preprocessing for downsampled_imagenet 32x32 and 64x64 generation from http://arxiv.org/abs/1601.06759 (page 8). """ del training def flatten_image(features): img = features['image'] flat = ab.cast(ab.reshape(img, [-1]), ab.int64) new_features = {'image': flat} return new_features return dataset.map(flatten_image) @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def concat_preprocess(dataset, training, pad_symbol=0): """Pre-processing function that concatenates input and target for LM.""" del training def concat(features, targets): inp = features['inputs'] pad = ab.expand_dims(ab.zeros_like(inp[0]) + pad_symbol, axis=0) concat = ab.concat([pad, inp, pad, targets], axis=0) # Note: we're updating existing features dictionary here, so make sure # it is not re-used in some other ways outside of this function. features['inputs'] = concat return features, concat dataset = dataset.map(concat) return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def squeeze_targets_preprocess(dataset, training): """Pre-processing function that squeezes last axis of targets.""" del training def squeeze(features, targets): if targets.shape[-1] == 1: targets = ab.squeeze(targets, axis=-1) return features, targets dataset = dataset.map(squeeze) return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def lm1b_preprocess(dataset, training, max_target_length=-1, max_eval_target_length=-1): """Preprocessing for LM1B: filter out targets exceeding maximum length.""" def target_right_length(_, target): return ab.less(ab.shape(target)[0], max_target_length + 1) def eval_target_right_length(_, target): return ab.less(ab.shape(target)[0], max_eval_target_length + 1) if max_target_length > 0 and training: dataset = dataset.filter(target_right_length) if max_eval_target_length > 0 and not training: dataset = dataset.filter(eval_target_right_length) return dataset # TODO(lukaszkaiser): find a single more abstract way of text pre-processing. @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def wmt_preprocess(dataset, training, max_length=-1, max_eval_length=-1): """Preprocessing for LM1B: filter out targets exceeding maximum length.""" def train_right_length(example, target): l = ab.maximum(ab.shape(example['inputs'])[0], ab.shape(target)[0]) return ab.less(l, max_length + 1) def eval_right_length(example, target): l = ab.maximum(ab.shape(example['inputs'])[0], ab.shape(target)[0]) return ab.less(l, max_eval_length + 1) if max_length > 0 and training: dataset = dataset.filter(train_right_length) if max_eval_length > 0 and not training: dataset = dataset.filter(eval_right_length) return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def wmt_concat_preprocess(dataset, training, max_length=-1, max_eval_length=-1): """Preprocessing for WMT: filter exceeding maximum length and concatenate.""" dataset = wmt_preprocess(dataset, training, max_length, max_eval_length) def concat_and_add_mask(features, targets): inp = features['inputs'] pad = ab.expand_dims(ab.zeros_like(inp[0]), axis=0) concat = ab.concat([inp, pad, targets], axis=0) mask = ab.concat([ab.zeros_like(inp), pad, ab.ones_like(targets)], axis=0) features['inputs'] = concat features['mask'] = mask return features, concat dataset = dataset.map(concat_and_add_mask) return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def lm_token_preprocessing(dataset, training): """Concatenates inputs, 0, targets, with masking only for targets.""" del training def concat_and_add_mask(x): inp = x['inputs'] targets = x['targets'] pad = ab.expand_dims(ab.zeros_like(inp[0]), axis=0) concat = ab.concat([inp, pad, targets], axis=0) mask = ab.concat([ab.zeros_like(inp), pad, ab.ones_like(targets)], axis=0) x['inputs'] = concat x['targets'] = concat x['mask'] = mask return x dataset = dataset.map(concat_and_add_mask) return dataset @gin.configurable(module='trax.data', denylist=['hparams']) def bair_robot_pushing_hparams(hparams=None, video_num_input_frames=1, video_num_target_frames=15): if hparams is not None: hparams.video_num_input_frames = video_num_input_frames hparams.video_num_target_frames = video_num_target_frames else: return video_num_input_frames, video_num_target_frames @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def bair_robot_pushing_preprocess(dataset, training): """Pre-processing function that concatenates input and target frames.""" del training def concat_and_add_mask(features, targets): """Concatenate input and output frames to form a language modeling setup.""" inp = features['inputs'] concat = ab.concat([inp, targets], axis=0) mask = ab.concat([ab.zeros_like(inp), ab.ones_like(targets)], axis=0) concat = ab.reshape(concat, (-1,)) mask = ab.reshape(mask, (-1,)) concat = ab.cast(concat, ab.int32) mask = ab.cast(mask, ab.float32) features['inputs'] = features['targets'] = concat features['mask'] = mask return features, concat dataset = dataset.map(concat_and_add_mask) return dataset def sentencepiece_tokenize(stream, spm_path=None, extra_ids=0): """Sentencepiece tokenization.""" spm_path = spm_path or t5_data().DEFAULT_SPM_PATH vocab_file = os.path.basename(spm_path) vocab_dir = os.path.dirname(spm_path) vocab = _get_vocab(vocab_type='sentencepiece', vocab_file=vocab_file, vocab_dir=vocab_dir, extra_ids=extra_ids) for example in stream: # example could either be str or (str,) if isinstance(example, tuple): example = example[0] yield np.array(vocab.encode(example)) @gin.configurable(module='trax.data') def SentencePieceTokenize( # pylint: disable=invalid-name spm_path=None, extra_ids=0): """Returns a function that maps text to integer arrays.""" return lambda g: sentencepiece_tokenize( # pylint: disable=g-long-lambda g, spm_path=spm_path, extra_ids=extra_ids) @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def c4_preprocess(dataset, training, max_target_length=-1, tokenization=None, spm_path=None): """Pre-processing function for C4 dataset.""" del training def unicode_decode_chars(features, targets): targets = ab.strings.unicode_decode(features['text'], 'UAB-8') targets = ab.cast(targets, ab.int64) features['targets'] = targets features['inputs'] = targets return (features, targets) def spc_tokenize(tokenizer, features, targets): del targets tokenized_text = tokenizer.tokenize(features['text']) features['targets'] = ab.cast(tokenized_text, ab.int64) features['inputs'] = features['targets'] return features, features['targets'] if tokenization == 'spc': spm_path = spm_path or t5_data().DEFAULT_SPM_PATH with ab.compat.v1.gfile.GFile(spm_path, 'rb') as f: spc_model = f.read() tokenizer = tf_text.SentencepieceTokenizer(model=spc_model) dataset = dataset.map(functools.partial(spc_tokenize, tokenizer)) else: dataset = dataset.map(unicode_decode_chars) def target_right_length(_, target): return ab.less(ab.shape(target)[0], max_target_length + 1) if max_target_length > 0: dataset = dataset.filter(target_right_length) return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def c4_bare_preprocess_fn(dataset, training=True, spm_path=None, copy_pretokenized=True, sequence_length=None): """Returns a dataset that contains 'inputs' and 'targets' from C4.""" # Set target key to be equal to the text content. dataset = t5_data().preprocessors.rekey( dataset, key_map={ 'targets': 'text', 'inputs': None }) # Vocabulary for tokenization. extra_ids = 0 vocab = t5_data().SentencePieceVocabulary( sentencepiece_model_file=spm_path or t5_data().DEFAULT_SPM_PATH, extra_ids=extra_ids) feature = t5_data().Feature(vocab) output_features = {'targets': feature, 'inputs': feature} # Tokenize the targets. keys = output_features def encode_string_features_fn(features): """Encode all specified feature that are strings and return a dictionary. Args: features: a dictionary Returns: a dictionary """ ret = {} for k, v in features.items(): if k in keys and v.dtype == ab.string: if copy_pretokenized: ret['%s_pretokenized' % k] = v v = ab.cast(output_features[k].vocabulary.encode_tf(v), ab.int64) ret[k] = v return ret dataset = dataset.map( encode_string_features_fn, num_parallel_calls=ab.data.experimental.AUTOTUNE) # Preprocess the tokens - the exact preprocessors are set via gin. dataset = t5_data().preprocessors.unsupervised( dataset, sequence_length=sequence_length, output_features=output_features) # Add EOS. dataset = add_eos_to_output_features(dataset, training) # Truncate and then pad the examples -- all examples have the same shape. dataset = truncate_dataset_on_len(dataset, training, sequence_length, True) dataset = pad_dataset_to_length(dataset, training, sequence_length) return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def filter_dataset_on_len(dataset, training, len_map=None, filter_on_eval=False): """Filters a dataset of lengths given in `len_map`. Args: dataset: `ab.data.Dataset` the dataset to filter. training: bool, true if we are in training mode. len_map: optional dict of str to (int, int). We filter examples where a feature's size is beyond the specified bounds. Ex: {'inputs': (1, 512), 'targets': (64, 128)} will keep only those examples where 1 <= len(inputs) <= 512 and 64 <= len(targets) <= 128. filter_on_eval: bool if true, we will filter in eval mode also. Returns: a filtered `ab.data.Dataset`. """ if (len_map is None) or (not training and not filter_on_eval): return dataset assert isinstance(len_map, dict) for k, bounds in len_map.items(): # pylint: disable=cell-var-from-loop # TODO(afrozm): Investigate `cell-var-from-loop` - since this is WAI and # there is a test too. def within_bounds(x, key, len_bounds): size = ab.shape(x[key])[0] min_len, max_len = len_bounds return (min_len <= size) and (size <= max_len) dataset = dataset.filter(lambda x: within_bounds(x, k, bounds)) # pylint: enable=cell-var-from-loop return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def truncate_dataset_on_len(dataset, training, len_map=None, truncate_on_eval=False): """Truncates features in an example to lengths given in `len_map`. Args: dataset: `ab.data.Dataset` the dataset to filter. training: bool, true if we are in training mode. len_map: optional dict of str to int, we truncate examples where a feature's size is beyond the max. Ex: {'inputs': 512, 'targets': 64} will truncate examples to be within those bounds. truncate_on_eval: bool if true, we will truncate in eval mode also. Returns: a filtered `ab.data.Dataset`. """ if (len_map is None) or (not training and not truncate_on_eval): return dataset assert isinstance(len_map, dict) def truncate_example(x): for key, max_len in len_map.items(): x_len = ab.shape(x[key])[0] if x_len > max_len: x[key] = x[key][:max_len, ...] return x return dataset.map(truncate_example) @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def pad_dataset_to_length(dataset, training, len_map=None): """Pad features less than specified length to specified length.""" del training if len_map is None: return dataset def pad_to_len(x): for key, max_len in len_map.items(): x_shape = ab.shape(x[key]) x_len = x_shape[0] if x_len < max_len: pad_shape = [ max_len - x_len, ] zeros = ab.zeros(pad_shape, dtype=x[key].dtype) x[key] = ab.concat([x[key], zeros], 0) return x return dataset.map(pad_to_len) @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def add_eos_to_output_features(dataset, training, output_features='targets', eos=1): """Adds `EOS` to all features in `output_features`.""" del training if not isinstance(output_features, (list, tuple)): output_features = [output_features] def add_eos(x): for output_feature in output_features: x[output_feature] = ab.concat([x[output_feature], [eos]], axis=0) return x return dataset.map(add_eos) @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def generic_text_dataset_preprocess_fn(dataset, training=True, text_preprocess_fns=None, token_preprocess_fns=None, spm_path=None, copy_pretokenized=False, debug_print_examples=False, debug_print_examples_rate=0.01): """Pre-processes, tokenizes and post-processes a `ab.data.Dataset`. Args: dataset: `ab.data.Dataset` to process. training: boolean, set to True if training, False otherwise. text_preprocess_fns: None or list of callables: `ab.data.Dataset`, bool -> `ab.data.Dataset` this operates before tokenization. Typically used to select which fields we want to learn over or change something into "text to text" form. token_preprocess_fns: None or list of callables: `ab.data.Dataset`, bool -> `ab.data.Dataset`, this operates after tokenization. Since this can view the tokenized fields, this can be used to filter on length etc. spm_path: None or str, path to a sentencepiece model to use for tokenization by default uses the 32k vocabulary from T5. copy_pretokenized: bool, if True retains the original fields after tokenization. debug_print_examples: bool, if True this prints examples to the logging stream for inspection, both before and after tokenization. debug_print_examples_rate: float, [0, 1.0], on average this fraction of dataset examples will be printed out in each phase i.e. pre and post tokenization. Returns: a `ab.data.Dataset` with all the preprocessing and tokenization performed. """ # The assumption is that `text_preprocess_fns` finally gives us a dataset # which has `inputs` and `targets`. if text_preprocess_fns is not None: for text_preprocess_fn in text_preprocess_fns: dataset = text_preprocess_fn(dataset, training) # Print debugging examples if needed before tokenization. if debug_print_examples: def print_examples(x): if np.random.uniform() < debug_print_examples_rate: ab.print(x, output_stream=logging.info) return x dataset = dataset.map(print_examples) # Vocabulary for tokenization. extra_ids = 0 vocab = t5_data().SentencePieceVocabulary( sentencepiece_model_file=spm_path or t5_data().DEFAULT_SPM_PATH, extra_ids=extra_ids) feature = t5_data().Feature(vocab) output_features = {'targets': feature, 'inputs': feature} # Tokenize the inputs and targets. dataset = t5_data().preprocessors.tokenize( dataset, output_features, copy_pretokenized=copy_pretokenized) # Apply the token-preprocessors. if token_preprocess_fns is not None: for token_preprocess_fn in token_preprocess_fns: dataset = token_preprocess_fn(dataset, training) if debug_print_examples: def print_examples_and_shapes(x): if np.random.uniform() < debug_print_examples_rate: ab.print( { 'inputs_shape': ab.size(x['inputs']), 'targets_shape': ab.size(x['targets']), 'inputs': x['inputs'], 'targets': x['targets'], }, output_stream=logging.info) return x dataset = dataset.map(print_examples_and_shapes) return dataset @gin.configurable(module='trax.data') def get_t5_preprocessor_by_name(name=None, fn_kwargs=None): """Returns a closure of any T5 preprocessor function with its arguments. The main use-case is to use this (with gin scopes) to make any preprocessor function available in a gin file to configure and use. See: `ABInputs.test_gin_configurable_preprocessors` Args: name: str, name of the preprocessor function to configure. fn_kwargs: optional dictionary, the arguments to configure, these will be partially applied to the function given by `name`. Returns: a closure of the preprocessor function along with its arguments, this function takes two arguments only, dataset and boolean training and ignores the training and calls the t5 processor with the dataset (and closed over arguments only). """ assert name is not None f = getattr(t5_data().preprocessors, name) if fn_kwargs is not None: f = functools.partial(f, **fn_kwargs) return lambda ds, unused_training: f(ds) def download_and_prepare(dataset_name, data_dir): """Downloads and prepares T2T or ABDS dataset. Args: dataset_name: tfds dataset or t2t problem name prefixed by 't2t_'. data_dir: location of existing dataset or None. Returns: data_dir: path string of downloaded data. """ if not data_dir: data_dir = os.path.expanduser('~/arrayblow_datasets/') dl_dir = os.path.join(data_dir, 'download') logging.info( 'No dataset directory provided. ' 'Downloading and generating dataset for %s inside data directory %s ' 'For large datasets it is better to prepare datasets manually!', dataset_name, data_dir) if dataset_name.startswith('t2t_'): # Download and run dataset generator for T2T problem. data_dir = os.path.join(data_dir, dataset_name) ab.io.gfile.makedirs(data_dir) ab.io.gfile.makedirs(dl_dir) t2t_problems().problem(dataset_name[len('t2t_'):]).generate_data( data_dir, dl_dir) else: # Download and prepare ABDS dataset. tfds_builder = tfds.builder(dataset_name) tfds_builder.download_and_prepare(download_dir=dl_dir) else: data_dir = os.path.expanduser(data_dir) return data_dir def BertSingleSentenceInputs(batch, # pylint: disable=invalid-name labeled=True, cls_id=101, sep_id=102): """Prepares inputs for BERT: add [SEP], [CLS] and create embeddings.""" if labeled: for sent1, label in batch: value_vector = np.concatenate(([cls_id], sent1, [sep_id])) segment_embs = np.zeros(sent1.shape[0] + 2, dtype=np.int32) yield value_vector, segment_embs, segment_embs, label, np.int32(1) else: for (sent1,) in batch: # row is a tuple with 1 element value_vector = np.concatenate(([cls_id], sent1, [sep_id])) segment_embs = np.zeros(sent1.shape[0] + 2, dtype=np.int32) yield value_vector, segment_embs, segment_embs def BertDoubleSentenceInputs(batch, # pylint: disable=invalid-name labeled=True, cls_id=101, sep_id=102): """Prepares inputs for BERT models by adding [SEP] and [CLS] tokens and creating segment embeddings.""" if labeled: for sent1, sent2, label in batch: value_vector = np.concatenate( ([cls_id], sent1, [sep_id], sent2, [sep_id])) segment_embs = np.zeros( sent1.shape[0] + sent2.shape[0] + 3, dtype=np.int32) second_sent_start = sent1.shape[0] + 2 segment_embs[second_sent_start:] = 1 yield value_vector, segment_embs, segment_embs, label, np.int32(1) else: for sent1, sent2 in batch: value_vector = np.concatenate( ([cls_id], sent1, [sep_id], sent2, [sep_id])) segment_embs = np.zeros( sent1.shape[0] + sent2.shape[0] + 3, dtype=np.int32) second_sent_start = sent1.shape[0] + 2 segment_embs[second_sent_start:] = 1 yield value_vector, segment_embs, segment_embs @gin.configurable(module='trax.data') def CreateBertInputs(double_sentence=True, # pylint: disable=invalid-name labeled=True, cls_id=101, sep_id=102): bert_inputs_fn = BertDoubleSentenceInputs if double_sentence else BertSingleSentenceInputs return functools.partial( bert_inputs_fn, labeled=labeled, cls_id=cls_id, sep_id=sep_id) @gin.configurable(module='trax.data') def mask_random_tokens(batch, explicit_vocab_size=30522, masking_prob=0.15, cls_id=101, sep_id=102, mask_id=103, vocab_start_id=999): """Prepares input for the masking task. Preparation consist in masking masking_prob percentage of non-special tokens at each input row; round(masking_prob * num_nonspecial_tokens) random tokens are selected out of which each token is either - replaced with [MASK] token with 80% probability, - replaced with random token with 10% probability, - or unchanged with 10%. The implentation is based on https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L342 Examples: - batch is a stream with each row having tuple (token_ids,). Function yields rows of form (modified_token_ids, original_tokens, token_weights), where modified_token_ids have [MASK] tokens or random tokens according to the procedure described above. - batch is a stream with each row having tuple (token_ids, segment_embeddings, nsp_label, nsp_weight).Function yields rows of form (modified_token_ids, segment_embeddings, nsp_label, nsp_weight, original_tokens, token_weights). Args: batch: stream of inputs. Each row in the stream is a tuple which first element is an array of tokens explicit_vocab_size: the total size of the vocabulary. masking_prob: Determines percent of non-special tokens to be selected for masking. cls_id: id of the special CLS token. sep_id: id of the special SEP token. mask_id: id of the special MASK token. vocab_start_id: id of first non-special token in the vocabulary. Yields: a stream with tokens masked for MLM training and 2 appended arrays: - original tokens: a copy of original tokens used as a label for mlm training - token_weights: weights distributed uniformly over selected tokens (sum is 1). Other tokens have 0 weight. """ for token_ids, *row_rest in batch: original_tokens = token_ids.copy() # choose tokens for prediction. Chooses 0.15 of # all non-special tokens is_special_token = np.logical_or(token_ids == cls_id, token_ids == sep_id) # CLS and SEP tokens is_special_token = np.logical_or(is_special_token, token_ids == 0) # padding viable_ids = np.arange(token_ids.shape[0])[~is_special_token] num_to_sample = round(masking_prob * viable_ids.shape[0]) if num_to_sample == 0: # sentence is too short to select given percentage of tokens to mask continue candidate_ids = np.random.choice(viable_ids, num_to_sample, replace=False) # create weights token_weights = np.zeros(token_ids.shape) token_weights[candidate_ids] = 1 / candidate_ids.shape[0] prob_scores = np.random.random(candidate_ids.shape) # change 80 % of tokens to [MASK] mask_token_ids = candidate_ids[prob_scores < 0.8] token_ids[mask_token_ids] = mask_id # change 10% of tokens to random token random_token_ids = candidate_ids[(0.8 <= prob_scores) & (prob_scores < 0.9)] token_ids[random_token_ids] = np.random.randint(vocab_start_id, explicit_vocab_size, random_token_ids.shape[0]) # rest (10%) is left unchaged yield (token_ids, *row_rest, original_tokens, token_weights) @gin.configurable(module='trax.data') def BertNextSentencePredictionInputs(dataset_name, # pylint: disable=invalid-name data_dir=None, text_key='text', train=True, shuffle_size=50000): """Defines a stream for the next sentence prediction task.""" stream = ABDS( dataset_name, data_dir=data_dir, tfds_preprocess_fn=functools.partial( t5_data().preprocessors.next_sentence_prediction, text_key=text_key, label_sentences=True, buffer_size=shuffle_size), keys=['inputs', 'targets'], train=train) def split_stream(generator=None): # split string with 'sentence1:' and 'sentence2:' into two separate strings for text, target in stream(generator): text_str = str(text)[:-1] # removes last '"' which is always at the end sentences = text_str.split('sentence1: ')[1].split(' sentence2: ') if len(sentences) != 2: # 'sentence2:' appeared in the text and got mixed up with the label continue sent1, sent2 = sentences yield sent1, sent2, target == 'next' return split_stream @gin.configurable(module='trax.data') def CorpusToRandomChunks(dataset_name, num_tokens=512, train=True): # pylint: disable=invalid-name return ABDS( dataset_name, tfds_preprocess_fn=functools.partial( t5_data().preprocessors.random_split_text, max_words_per_segment=num_tokens), train=train, keys=['text']) _GLUE_KEYS = { 'cola': ('sentence',), 'sst2': ('sentence',), 'mrpc': ('sentence1', 'sentence2'), 'qqp': ('question1', 'question2'), 'stsb': ('sentence1', 'sentence2'), 'mnli': ('premise', 'hypothesis'), 'qnli': ('question', 'sentence'), 'rte': ('sentence1', 'sentence2'), 'wnli': ('sentence1', 'sentence2'), } # Labels inferred from the T5 paper: https://arxiv.org/pdf/1910.10683.pdf _GLUE_LABELS = { 'cola': ('unacceptable', 'acceptable'), 'sst2': ('negative', 'positive'), 'mrpc': ('not_equivalent', 'equivalent'), 'qqp': ('not_duplicate', 'duplicate'), 'stsb': ('sentence1', 'sentence2'), 'mnli': ('entailment', 'neutral', 'contradiction'), 'qnli': ('entailment', 'not_entailment'), 'rte': ('entailment', 'not_entailment'), 'wnli': ('sentence1', 'sentence2'), } # Defining separate <Foo>TrainStream and <Foo>EvalStream functions (below) # makes gin configuration expressions more direct. A single gin line can # configure each; for example: # # BertGlueTrainStream.benchmark= 'mnli' # BertGlueEvalStream.benchmark = 'mnli' # pylint: disable=invalid-name @gin.configurable(module='trax.data') def BertGlueTrainStream(benchmark=gin.REQUIRED): """Returns a Bert-preprocessed training stream for ``benchmark``. Args: benchmark: Simple lower-case name of a GLUE benchmark, e.g., ``'cola'``, ``'mnli'``, ``'rte'``. """ return _BertGlueDataStream(benchmark + '_t') # GLUE evals need special handling because one eval in particular, MNLI, has # two different eval sets: "matched" and "mismatched". The code in this module # distinguishes between the two using the suffixes '_e' versus '_e2', # respectively. def _ensure_eval_suffix(benchmark): """Returns a string ending in an eval suffix; adds ``'_e'`` suffix if needed. Args: benchmark: Name of a benchmark or task, that might already include an eval-indicating suffix (``'_e'`` or ``'_e2'``). """ if benchmark.endswith('_e') or benchmark.endswith('_e2'): return benchmark else: return benchmark + '_e' @gin.configurable(module='trax.data') def BertGlueEvalStream(benchmark=gin.REQUIRED): """Returns a Bert-preprocessed eval data stream for ``benchmark``. Args: benchmark: Simple lower-case name of a GLUE benchmark, e.g., ``'cola'``, ``'mnli'``, ``'rte'``. If the benchmark includes an alternate eval (e.g., MNLI's "mismatched" eval/validation split), you can specify it with an ``'_e2'`` suffix, e.g., ``'mnli_e2'``. """ return _BertGlueDataStream(_ensure_eval_suffix(benchmark)) def _BertGlueDataStream(benchmark_id): """Returns a Bert-preprocessed data stream for ``benchmark_id``. Args: benchmark_id: String that indicates the name and data split of a GLUE benchmark. Data splits are indicated as underscore suffixes, e.g., ``'cola_t'`` (Cola benchmark, training split), ``'rte_e'`` (RTE benchmark, eval/validation split), and ``'mnli_e2'`` (MNLI benchmark, alternate "mismatched" eval/validation split). """ benchmark_id = _ensure_eval_suffix(benchmark_id) benchmark, split = benchmark_id.rsplit('_', 1) glue_data = ABDS(f'glue/{benchmark}', keys=_GLUE_KEYS[benchmark], train=(split == 't'), use_alt_eval=(split == 'e2')) return data.Serial( glue_data, data.Tokenize(), data.CreateBertInputs(), data.Shuffle(), data.PadToLength(), data.TruncateToLength(), data.Batch(), ) @gin.configurable(module='trax.data') def T5GlueTrainStream(benchmark=gin.REQUIRED): """Returns a T5-preprocessed training data stream for ``benchmark``. Args: benchmark: Simple lower-case name of a GLUE benchmark, e.g., ``'cola'``, ``'mnli'``, ``'rte'``. """ return _T5GlueDataStream(benchmark + '_t') @gin.configurable(module='trax.data') def T5GlueTrainStreamsParallel(benchmark_list=gin.REQUIRED, counters=None, reweight_by_minimum=False, gradually_reweight=False): """Returns a parallel set of training streams, based on ``benchmark_list``. Args: benchmark_list: List of simple lower-case names of GLUE benchmarks, e.g., ``'cola'``, ``'mnli'``, ``'rte'``. counters: a list of counters to be passed to data.Parallel, e.g., [8551, 392702, 2490] would be a reasonable counterpart to benchmark_list = ["cola", "mnli", "rte"], see https://github.com/google-research/text-to-text-transfer-transformer/blob/master/t5/data/glue_utils.py#L42 for more details on counters. reweight_by_minimum: divide by the minimal counter. gradually_reweight: a more refined reweighting policy, see inputs.py for more details. """ stream_list = list(map(T5GlueTrainStream, benchmark_list)) return data.Parallel( stream_list, counters=counters, reweight_by_minimum=reweight_by_minimum, gradually_reweight=gradually_reweight)() @gin.configurable(module='trax.data') def T5GlueEvalStream(benchmark=gin.REQUIRED): """Returns a T5-preprocessed eval data stream for ``benchmark``. Args: benchmark: Simple lower-case name of a GLUE benchmark, e.g., ``'cola'``, ``'mnli'``, ``'rte'``. If the benchmark includes an alternate eval (e.g., MNLI's "mismatched" eval/validation split), you can specify it with an ``'_e2'`` suffix, e.g., ``'mnli_e2'``. """ return _T5GlueDataStream(_ensure_eval_suffix(benchmark)) @gin.configurable(module='trax.data') def T5GlueEvalStreamsParallel(benchmark_list=gin.REQUIRED): """Returns a parallel set of T5 eval streams, based on ``benchmark_list``. Args: benchmark_list: List of strings, each of which is a simple lower-case name of a GLUE benchmark, e.g., ``'cola'``, ``'mnli'``, ``'rte'``. If a benchmark includes an alternate eval (e.g., MNLI's "mismatched" eval/validation split), you can specify it with an ``'_e2'`` suffix, e.g., ``'mnli_e2'``. """ stream_list = list(map(T5GlueEvalStream, benchmark_list)) return data.Parallel(stream_list)() def _T5GlueDataStream(benchmark_id, t5_tokenization=False): """Returns a T5-preprocessed data stream for ``benchmark_id``. Args: benchmark_id: String that indicates the name and data split of a GLUE benchmark. Data splits are indicated as underscore suffixes, e.g., ``'cola_t'`` (Cola benchmark, training split), ``'rte_e'`` (RTE benchmark, eval/validation split), and ``'mnli_e2'`` (MNLI benchmark, alternate "mismatched" eval/validation split). t5_tokenization: if true, then use t5_tokenization. """ return data.Serial( _t5_glue_data_split(benchmark_id) if t5_tokenization else _t5_glue_data_split_no_token(benchmark_id), data.Tokenize(), data.Shuffle(), data.PadToLength(), data.TruncateToLength(), data.Batch(), ) @gin.configurable(module='trax.data') def T5GlueEvalTasks(benchmark_list=gin.REQUIRED): """Returns a list of T5 GLUE eval tasks, based on ``benchmark_list``. Args: benchmark_list: List of strings, each of which indicates the name and data split of a GLUE benchmark. Data splits are indicated as underscore suffixes, e.g., ``'cola_t'`` (Cola benchmark, training split), ``'rte_e'`` (RTE benchmark, eval/validation split), and ``'mnli_e2'`` (MNLI alternate "mismatched" eval/validation split). """ task_list = list(map(_T5GlueEvalTask, benchmark_list)) return task_list def _T5GlueEvalTask(benchmark_id): """Returns a T5 GLUE eval task, based on ``benchmark_id``.""" eval_data = T5GlueEvalStream(benchmark_id) benchmark_id = _ensure_eval_suffix(benchmark_id) metrics = [tl.WeightedCategoryAccuracy(), tl.SequenceAccuracy()] benchmark, split = benchmark_id.rsplit('_', 1) if benchmark == 'cola': name_upper = 'Cola' elif benchmark == 'mnli': name_upper = 'MNLI_matched' if split == 'e' else 'MNLI_mismatched' else: name_upper = benchmark.upper() return supervised.training.EvalTask( eval_data(), metrics, metric_names=[f'{name_upper} accuracy', f'{name_upper} sequence accuracy']) def _t5_glue_data_split_no_token(benchmark_id): """Returns a GLUE data split prepared with the standard T5 preprocessor.""" benchmark, split = _t5_glue_benchmark_and_split(benchmark_id) dataset = tfds.load(name=f'glue/{benchmark}', split=split) processed_dataset = t5_data().preprocessors.glue( # pylint: disable=g-long-lambda dataset, benchmark_name=benchmark, label_names=_GLUE_LABELS[benchmark]) def stream_of_inputs_targets_weights(generator=None): del generator while True: for example in processed_dataset: input_values = example['inputs'].numpy() target_values = example['targets'].numpy() yield (input_values, target_values, jnp.array([1] * len(target_values))) return stream_of_inputs_targets_weights def _t5_glue_data_split(benchmark_id): """Returns a GLUE data split prepared with the standard T5 preprocessor.""" benchmark, split = _t5_glue_benchmark_and_split(benchmark_id) dataset = tfds.load(name=f'glue/{benchmark}', split=split) processed_dataset = generic_text_dataset_preprocess_fn( dataset, spm_path=t5_data().DEFAULT_SPM_PATH, text_preprocess_fns=[ lambda ds, training: t5_data().preprocessors.glue( # pylint: disable=g-long-lambda ds, benchmark_name=benchmark, label_names=_GLUE_LABELS[benchmark]) ], copy_pretokenized=True, debug_print_examples=True, debug_print_examples_rate=0.05) dataset_as_numpy = tfds.as_numpy(processed_dataset) def stream_of_inputs_targets_weights(generator=None): del generator while True: for example in dataset_as_numpy: input_values = example['inputs'] target_values = example['targets'] yield (jnp.array(input_values), jnp.array(target_values), jnp.array([1] * len(target_values))) return stream_of_inputs_targets_weights def _t5_glue_benchmark_and_split(benchmark_id): benchmark, mode = benchmark_id.rsplit('_', 1) if mode == 't': split = 'train' elif benchmark == 'mnli': split = 'validation_mismatched' if mode == 'e2' else 'validation_matched' else: split = 'validation' return benchmark, split # pylint: enable=invalid-name def compute_single_result(op_name, num_args): """An implementation of the most popular ops from the MathQA dataset.""" # See https://gitlab.cs.washington.edu/amini91/mathqa-categorization/ # and specfically line 142 and following in new_DataStructure.py # for an implementation which covers more details. if op_name == 'add': return num_args[0] + num_args[1] elif op_name == 'circle_arc': return num_args[0] / 360 * math.pi * 2 * num_args[1] elif op_name == 'circle_area': return math.pi * num_args[0]**2 elif op_name == 'circle_sector_area': return num_args[1] / 360 * math.pi * (num_args[0]**2) elif op_name == 'circumface': return 2 * math.pi * num_args[0] elif op_name == 'choose': # Older versions of scipy may require scipy.misc.comb. return scipy.special.comb(num_args[0], num_args[1]) # pylint: disable=unreachable elif op_name == 'cosine': return math.cos(num_args[0]) elif op_name == 'cube_edge_by_volume': return num_args[0]**(1 / 3) elif op_name == 'combined_work': return 1 / ( min(num_args[0], 1 / num_args[0]) + min(num_args[1], 1 / num_args[1])) elif op_name == 'count_interval': return num_args[0] - num_args[1] + 1 elif op_name == 'diagonal': return math.sqrt(num_args[0]**2 + num_args[1]**2) elif op_name == 'divide' or op_name == 'speed': if num_args[1] != 0: return num_args[0] / num_args[1] else: return 0 elif op_name == 'factorial': return math.factorial(min(15, int(num_args[0]))) elif op_name == 'floor': return math.floor(num_args[0]) elif op_name == 'find_work': return 1 / ( max( min(num_args[0], 1 / num_args[0]), min( num_args[1], 1 / num_args[1])) - min( min(num_args[0], 1 / num_args[0]), min(num_args[1], 1 / num_args[1]))) elif op_name == 'from_percent': return num_args[0] / 100 elif op_name == 'gain_percent': return 100 + num_args[0] elif op_name == 'gcd': return scipy.gcd(int(num_args[0]), int(num_args[1])) elif op_name == 'inverse': if num_args[0] != 0: return 1 / num_args[0] else: return 0 elif op_name == 'lcm': return scipy.lcm(int(num_args[0]), int(num_args[1])) elif op_name == 'log': return math.log(max(1e-5, num_args[0]), 2) elif op_name == 'loss_percent': return 100 - num_args[0] elif op_name == 'max': return max(num_args[0], num_args[1]) elif op_name == 'multiply': return num_args[0] * num_args[1] elif op_name == 'negate_percent': return 100 - num_args[0] elif op_name == 'negate': return -num_args[0] elif op_name == 'original_price_before_loss': return num_args[1] * 100 / (100 + 1e-5 - num_args[0]) elif op_name == 'original_price_before_gain': return num_args[1] * 100 / (100 + num_args[0]) elif op_name == 'permutation': n, m = min(num_args[0], num_args[1]), max(num_args[0], num_args[1]) return math.factorial(int(m)) / math.factorial(int(m - n)) elif op_name == 'power': return num_args[0]**min(num_args[1], 5) elif op_name == 'percent': return num_args[0] / 100 * num_args[1] elif op_name == 'price_after_gain' or op_name == 'p_after_gain': return (1 + num_args[0] / 100) * num_args[1] elif op_name == 'price_after_loss' or op_name == 'price_after_loss': return (1 - num_args[0] / 100) * num_args[1] elif op_name == 'quadrilateral_area': return num_args[0] * (num_args[1] + num_args[2]) / 2 elif op_name == 'reminder': return num_args[0] % num_args[1] elif op_name == 'rectangle_area': return num_args[0] * num_args[1] elif op_name == 'rectangle_perimeter': return 2 * (num_args[0] + num_args[1]) elif op_name == 'rhombus_area': return num_args[0] * num_args[1] / 2 elif op_name == 'sine': return math.sin(num_args[0]) elif op_name == 'sqrt': return math.sqrt(max(0, num_args[0])) elif op_name == 'subtract': return num_args[0] - num_args[1] elif op_name == 'square_edge_by_perimeter': return num_args[0] / 4 elif op_name == 'square_edge_by_area': return math.sqrt(num_args[0]) elif op_name == 'square_area': return num_args[0]**2 elif op_name == 'surface_cube': return 6 * num_args[0]**2 elif op_name == 'surface_rectangular_prism': return 2 * ( num_args[0] * num_args[1] + num_args[0] * num_args[2] + num_args[1] * num_args[2]) elif op_name == 'semi_circle_perimiter': return math.pi * num_args[0] + 2 * num_args[0] elif op_name == 'square_perimeter' or op_name == 'rhombus_perimeter': return 4 * num_args[0] elif op_name == 'surface_sphere': return 4 * math.pi * num_args[0]**2 elif op_name == 'speed_ratio_steel_to_stream': return (num_args[0] + num_args[1]) / (num_args[0] - num_args[1]) elif op_name == 'speed_in_still_water': return (num_args[0] + num_args[1]) / 2 elif op_name == 'stream_speed': return (num_args[0] - num_args[1]) / 2 elif op_name == 'trapezium_area': return num_args[0] * (num_args[1] + num_args[2]) / 2 elif op_name == 'triangle_area': return num_args[0] * num_args[1] / 2 elif op_name == 'triangle_perimeter': return num_args[0] + num_args[1] + num_args[2] elif op_name == 'triangle_area_three_edges': # Heron's formula s = (num_args[0] + num_args[1] + num_args[2]) / 2 return math.sqrt( max(0, s * (s - num_args[0]) * (s - num_args[1]) * (s - num_args[2]))) elif op_name == 'union_prob': return num_args[0] + num_args[1] - num_args[2] elif op_name == 'negate_prob': return 1 - num_args[0] elif op_name == 'volume_cube': return num_args[0]**3 elif op_name == 'volume_cone': return math.pi * num_args[0]**2 * num_args[1] / 3 elif op_name == 'volume_cylinder': return math.pi * num_args[0]**2 * num_args[1] elif op_name == 'volume_rectangular_prism': return num_args[0] * num_args[1] * num_args[2] elif op_name == 'volume_sphere': return 4 / 3 * math.pi * num_args[0]**3 def compute_result(list_op, list_num): """Python execution of MathQA ops.""" # The last of temporary results is the final answer. temporary_results = [] for op in list_op: op_name = op.split('(')[0] start_bracket = op.find('(') end_bracket = op.find(')') op_args = op[start_bracket + 1:end_bracket].split(',') num_args = [] for arg in op_args: # The hash stands for a number stored in temporary_results. # For example #2 refers to the third temporary result. if arg[0] == '#': temp_index = int( re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', arg)[0]) num_args.append(temporary_results[temp_index]) # The n prefix stands for numbers which listed in list_num - # originally they were contained in the text. elif arg[0] == 'n': n_index = int( re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', arg)[0]) num_args.append(list_num[n_index]) elif arg[0] == 'c': if arg == 'const_pi': constant = math.pi elif arg == 'const_deg_to_rad': constant = math.pi / 180 else: consts = re.findall( r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', arg) if len(consts) == 1: constant = float(consts[0]) else: constant1 = float(consts[0]) constant2 = float('0.' + consts[1]) constant = constant1 + constant2 num_args.append(constant) temporary_results.append(compute_single_result(op_name, num_args)) return temporary_results def single_op_to_python_command(op_name, num_args): """An implementation of the most popular ops from the MathQA dataset.""" # See https://gitlab.cs.washington.edu/amini91/mathqa-categorization/ # and specfically line 142 and following in new_DataStructure.py # for an implementation which covers more details. if op_name == 'add': return '{} + {}'.format(num_args[0], num_args[1]) elif op_name == 'circle_arc': return '{} / 360 * math.pi * 2 * {}'.format(num_args[0], num_args[1]) elif op_name == 'circle_area': return 'math.pi * {}**2'.format(num_args[0]) elif op_name == 'circle_sector_area': return '{} / 360 * math.pi * ({}**2)'.format(num_args[1], num_args[0]) elif op_name == 'circumface': return '2 * math.pi * {}'.format(num_args[0]) elif op_name == 'choose': # Older versions of scipy may require scipy.misc.comb. return 'scipy.special.comb({}, {})'.format(num_args[0], num_args[1]) # pylint: disable=unreachable elif op_name == 'cosine': return 'math.cos({})'.format(num_args[0]) elif op_name == 'cube_edge_by_volume': return '{}**(1 / 3)'.format(num_args[0]) elif op_name == 'combined_work': return '1 / (min({}, 1 / {}) + min({}, 1 / {}))'.format( num_args[0], num_args[0], num_args[1], num_args[1]) elif op_name == 'count_interval': return '{} - {} + 1'.format(num_args[0], num_args[1]) elif op_name == 'diagonal': return 'math.sqrt({}**2 + {}**2)'.format(num_args[0], num_args[1]) elif op_name == 'divide' or op_name == 'speed': # safe divide if num_args[1] != 0: return '{} / {}'.format(num_args[0], num_args[1]) else: return '0' elif op_name == 'factorial': return 'math.factorial(min(15, int({})))'.format(num_args[0]) elif op_name == 'floor': return 'math.floor({})'.format(num_args[0]) elif op_name == 'find_work': return ('1 / (max(min({}, 1 / {}), min({}, 1 / {})) - min(min({}, 1 / {}), ' 'min({}, 1 / {})))').format(num_args[0], num_args[0], num_args[1], num_args[1], num_args[0], num_args[0], num_args[1], num_args[1]) elif op_name == 'from_percent': return '{} / 100'.format(num_args[0]) elif op_name == 'gain_percent': return '100 + {}'.format(num_args[0]) elif op_name == 'gcd': return 'scipy.gcd(int({}), int({}))'.format(num_args[0], num_args[1]) elif op_name == 'inverse': # safe inverse if num_args[0] != 0: return '1 / {}'.format(num_args[0]) else: return '0' elif op_name == 'lcm': return 'scipy.lcm(int({}), int({}))'.format(num_args[0], num_args[1]) elif op_name == 'log': return 'math.log(max(1e-5, {}), 2)'.format(num_args[0]) elif op_name == 'loss_percent': return '100 - {}'.format(num_args[0]) elif op_name == 'max': return 'max({},{})'.format(num_args[0], num_args[1]) elif op_name == 'multiply': return '{} * {}'.format(num_args[0], num_args[1]) elif op_name == 'negate_percent': return '100 - {}'.format(num_args[0]) elif op_name == 'negate': return '-{}'.format(num_args[0]) elif op_name == 'original_price_before_loss': return '{} * 100 / (100 + 1e-5 - {}) # original price before loss'.format( num_args[1], num_args[0]) elif op_name == 'original_price_before_gain': return '{} * 100 / (100 + {}) # original_price_before gain'.format( num_args[1], num_args[0]) elif op_name == 'permutation': return ('math.factorial(int(max({}, {}))) / math.factorial(int(max({}, {}) ' '- min({}, {}))) # find all permutations').format( num_args[0], num_args[1], num_args[0], num_args[1], num_args[0], num_args[1]) elif op_name == 'power': return '{}**min({}, 5)'.format(num_args[0], num_args[1]) elif op_name == 'percent': return '{} / 100 * {}'.format(num_args[0], num_args[1]) elif op_name == 'price_after_gain' or op_name == 'p_after_gain': return '(1 + {} / 100) * {}'.format(num_args[0], num_args[1]) elif op_name == 'price_after_loss' or op_name == 'price_after_loss': return '(1 - {} / 100) * {}'.format(num_args[0], num_args[1]) elif op_name == 'quadrilateral_area': return '{} * ({} + {}) / 2 # quadrilateral area'.format( num_args[0], num_args[1], num_args[2]) elif op_name == 'reminder': return '{} % {}'.format(num_args[0], num_args[1]) elif op_name == 'rectangle_area': return '{} * {} # area of rectangle'.format(num_args[0], num_args[1]) elif op_name == 'rectangle_perimeter': return '2 * ({} + {}) # perimetere of rectangle'.format( num_args[0], num_args[1]) elif op_name == 'rhombus_area': return '{} * {} / 2'.format(num_args[0], num_args[1]) elif op_name == 'sine': return 'math.sin({})'.format(num_args[0]) elif op_name == 'sqrt': return 'math.sqrt(max(0, {}))'.format(num_args[0]) elif op_name == 'subtract': return '{} - {}'.format(num_args[0], num_args[1]) elif op_name == 'square_edge_by_perimeter': return '{} / 4. # square edge given perimeter'.format(num_args[0]) elif op_name == 'square_edge_by_area': return 'math.sqrt({}) # square edge given area'.format(num_args[0]) elif op_name == 'square_area': return '{}**2'.format(num_args[0]) elif op_name == 'surface_cube': return '6 * {}**2 # surface of a cube'.format(num_args[0]) elif op_name == 'surface_rectangular_prism': return '2 * ({} * {} + {} * {} + {} * {}) # surface of a rectangular prism'.format( num_args[0], num_args[1], num_args[0], num_args[2], num_args[1], num_args[2]) elif op_name == 'semi_circle_perimiter': return 'math.pi * {} + 2 * {} # perimeter of a semi-circle'.format( num_args[0], num_args[0]) elif op_name == 'square_perimeter' or op_name == 'rhombus_perimeter': return '4 * {}'.format(num_args[0]) elif op_name == 'surface_sphere': return '4 * math.pi * {}**2'.format(num_args[0]) elif op_name == 'speed_ratio_steel_to_stream': return '({} + {}) / ({} - {})'.format(num_args[0], num_args[1], num_args[0], num_args[1]) elif op_name == 'speed_in_still_water': return '{} + {} / 2'.format(num_args[0], num_args[1]) elif op_name == 'stream_speed': return '{} - {} / 2'.format(num_args[0], num_args[1]) elif op_name == 'trapezium_area': return '{} * ({} + {}) / 2'.format(num_args[0], num_args[1], num_args[2]) elif op_name == 'triangle_area': return '{} * {} / 2'.format(num_args[0], num_args[1]) elif op_name == 'triangle_perimeter': return '{} + {} + {} # perimeter of a triangle'.format( num_args[0], num_args[1], num_args[2]) elif op_name == 'triangle_area_three_edges': return ("(lambda s, a, b, c: math.sqrt(max(0, s * (s - a) * (s - b) * (s - " "c))))(({} + {} + {}) / 2, {}, {}, {}) # Heron's formula").format( num_args[0], num_args[1], num_args[2], num_args[0], num_args[1], num_args[2]) elif op_name == 'union_prob': return '{} + {} - {}'.format(num_args[0], num_args[1], num_args[2]) elif op_name == 'negate_prob': return '1 - {}'.format(num_args[0]) elif op_name == 'volume_cube': return '{}**3'.format(num_args[0]) elif op_name == 'volume_cone': return 'math.pi * {}**2 * {} / 3'.format(num_args[0], num_args[1]) elif op_name == 'volume_cylinder': return 'math.pi * {}**2 * {}'.format(num_args[0], num_args[1]) elif op_name == 'volume_rectangular_prism': return '{} * {} * {}'.format(num_args[0], num_args[1], num_args[2]) elif op_name == 'volume_sphere': return '4 / 3 * math.pi * {}**3'.format(num_args[0]) def compute_program(list_op): """Python execution of MathQA ops.""" # The last of temporary results is the final answer. temporary_results = [] num_op = 0 for op in list_op: op_name = op.split('(')[0] start_bracket = op.find('(') end_bracket = op.find(')') op_args = op[start_bracket + 1:end_bracket].split(',') num_args = [] for arg in op_args: # The hash stands for a number stored in temporary_results. # For example #2 refers to the third temporary result. if arg[0] == '#': temp_index = int( re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', arg)[0]) num_args.append('t{}'.format(temp_index)) # The n prefix stands for numbers which listed in list_num - # originally they were contained in the text. elif arg[0] == 'n': # n_index = int( # re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', # arg)[0]) num_args.append(arg) elif arg[0] == 'c': if arg == 'const_pi': constant = math.pi elif arg == 'const_deg_to_rad': constant = math.pi / 180 else: consts = re.findall( r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', arg) if len(consts) == 1: constant = float(consts[0]) else: constant1 = float(consts[0]) constant2 = float('0.' + consts[1]) constant = constant1 + constant2 num_args.append(str(constant)) temporary_result = 't{} = {}'.format( num_op, single_op_to_python_command(op_name, num_args)) temporary_results.append(temporary_result) num_op += 1 return temporary_results def compute_nums(question): """Finds numbers in a string and convert them to floats.""" # The funny looking replace is needed to deal with numbers such as 4,000 # TODO(henrykm) deal with numbers written as words "one", "two", ... return [ float(num.replace(',', '')) for num in re.findall( r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', question) ] def compute_ops(linear_formula): list_op = linear_formula.split('|') # In some cases the list of operations contains a superflous last element, # namely an empty string. if not list_op[-1]: list_op = list_op[:-1] return list_op def process_single_mathqa_example(example): """Execute a single example and verify coherence of a MathQA problem. Args: example: a dictionary with the following fields: Problem - a natural language formulation of the problem Rationale - a natural language solution of the problem options - five possible answers ( a) b) c) d) and e) ) correct - the letter representing the correct answer annotated_formula - formula representing the full solution linear_formula - a string of operations separated by the | character, e.g. multiply(n2,const_100)|multiply(n0,n1)|divide(#0,#1)| multiply(#2,const_100)|divide(#3,#1)| category - a natural language description of the category to which a given problem belongs. Returns: answer_num: numerical answer contained in the example python_result: numerical answers computed in Python, including intermediate results. The answer_num should be close python_result[-1] list_op: list of arithmetic operations list_num: list of identified numbers in the text """ question = example['Problem'] list_num = compute_nums(question) list_op = compute_ops(example['linear_formula']) answers = example['options'] correct_answer = example['correct'] index = answers.find('{} )'.format(correct_answer)) answer_string = re.findall( r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', answers[index:]) # The if statement deals with empty lists - they are needed to treat # a correct non-numerical answer e) None of the above. Here we do not want # non-numerical answers, hence we return None. if answer_string: answer_num = float( re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', answers[index:])[0].replace(',', '')) else: return None # The if statements below deals with answers written as fractions e.g. # a ) 1 / 2 , b ) 1 / 3 , c ) 1 / 5 , d ) 10 / 30 , e ) 2 / 5 ? index_end_of_answer = index + len(str(answer_num)) + 3 if index_end_of_answer < len(answers) and answers[index_end_of_answer] == '/': answer_denom = float( re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', answers[index_end_of_answer:])[0].replace(',', '')) answer_num /= answer_denom python_result = compute_result(list_op, list_num) python_program = compute_program(list_op) return answer_num, python_result, python_program, list_op, list_num def convert_float_to_mathqa(number): floor = int(float(number)) if floor == number: return 'const_' + str(floor) else: return 'const_' + str(floor) + '_' + str(number)[len(str(floor)) + 1:] def convert_to_subtract(const_string): return 'subtract({},const_0)'.format(const_string) def execute_mathqa_dsl_program(problem, dsl_code): """Executes the DSL code for a given problem. Args: problem: problem formulation (needed to get parameters). dsl_code: DSL code. Returns: the result of executing of the DSL code. """ n0_loc = problem.find('n0') list_num = compute_nums(problem[n0_loc:]) # The list contains _all_ numbers in the string, hence in particular # for n0 = 2.0 n1 = 3.0 we are getting list_num = [0.0, 2.0, 1.0, 3.0], # so that below we are filtering the odd occurrences. assert len(list_num) % 2 == 0 list_num = [list_num[2 * i + 1] for i in range(int(len(list_num) / 2))] # dsl_code is a list of strings; since all DSL programs are single liners, # we need to guess the correct line. For now we use the same location as in # in the ground truth examples, that is the first line. list_op = compute_ops(dsl_code[0]) try: results = compute_result(list_op, list_num)[-1] except: # pylint: disable=bare-except results = None return results def is_number(s): try: float(s) return True except: # pylint: disable=bare-except return False def execute_mathqa_program(problem, program): """Executes the DSL code for a given problem. Args: problem: problem formulation (not needed, but we want the same API as in the DSL case). program: Python code. Returns: the result of executing of the Python code. """ del problem # problem only needed in the DSL version. # Programs are lists of strings. We need to concatenate them in order to exec. program = '\n'.join(program) var_dict = {} try: # The logic of this is the following: if exec with timeout is working # without exceptions, then we can call exec again and gather the variables. exec(program, globals(), var_dict) # pylint: disable=exec-used if 'answer' in var_dict and is_number(var_dict['answer']): return float(var_dict['answer']) else: return None except: # pylint: disable=bare-except return None @gin.configurable(module='trax.data') def CreateMathQAInputs( # pylint: disable=invalid-name dataset_path=None, train=True, test=False, challenge=False, tolerance=0.01, cumulative=True, python_code=False, full_dict=False, partial_results=True, nlp_rationale=False, correct_answer=False, answer_in_mathqa_format=True, correct_answer_given_reasoning=False, category=False, order_prediction=False, reduced_operation_name=True, qed=False): """Prepares MathQA inputs. The generation procedure leaves a lot parameters to be set by the user. Currently we support only correct examples in the following sense: python execution agrees with the declared answer up to 1%. According to this criterion wrong examples such as problem: calculate 85184 ÷ ? = 352 operations ['multiply(n0,n1)'] are ignored (this should be divide(n0,n1) in this case). Args: dataset_path: a path with the MathQA dataset. train: if True, then generate training examples; if train, test and challenge are set to False generate validation examples. test: if train is set to False and test is set to True, then generate test examples. challenge: if train and test are set to False and challenge is set to True, then generate challenge examples. tolerance: if for a given example relative difference between Python result and the result declared in the dataset exceeds the level, then the example is dropped; tolerances ranging from 0.1 to 0.001 yield from 18K to 21K examples. cumulative: if set to True, then generate examples in the format input - problem + numbers + op1 + op2 + op3 target - op4 If set to False, then examples are in the format input - problem + numbers target - all operations. python_code: if set to True, then generates python code instead of MathQA commands. full_dict: if set to True, then Python examples are returned together with the DSL code and the NLP rationale. partial_results: if set to True, then partial results will be reported as part of the input, e.g. input - problem + numbers + op1 + #1 + op2 + #2 + op3 + #3, target - op4, where #k is the partial results from operation opk. Activated only in cumulative set to True. nlp_rationale: if set to True, then input is the problem and the target is the nlp rationale. correct_answer: if set to True, then input is the problem plus all possible answers and the target is the correct answer. answer_in_mathqa_format: if set to True, then convert numerical answer to the MathQA format and wrap it in the subtract operation. E.g. "3.13" is converted to "subtract(const_3_13,const_0)". correct_answer_given_reasoning: if set to True, then input is the problem plus linear formula plus all possible answers and the target is the correct answer. category: if set to True, then input is the problem and the target is its category. order_prediction: if set to True, then input is the problem and a list of all operations; with probability 0.5 two operations are swapped; the task consists in detecting whether the operations were swapped. See the order prediction task in CreateAquaInputs in this file. reduced_operation_name: If set to True, then in order prediction consider only the operation token without parameterers. qed: if set to True, then the reasoning is finished with an additional operation qed. Returns: mathqa_yield_examples: a generator of MathQA examples; the generator yields non-tokenized examples - they can be further processed using for example the tokenize function from this module """ if train: dataset_path = os.path.join(dataset_path, 'train.json') elif test: dataset_path = os.path.join(dataset_path, 'test.json') elif challenge: dataset_path = os.path.join(dataset_path, 'challenge_test.json') else: dataset_path = os.path.join(dataset_path, 'dev.json') # Opening with GFile allows to use remotely stored files, e.g. # in a gs bucket. dataset_handle = ab.io.gfile.GFile(dataset_path, 'r') dataset = json.load(dataset_handle) def mathqa_yield_examples(generator=None): del generator while True: for example in itertools.cycle(dataset): result = process_single_mathqa_example(example) # TODO(henrykm): Remove the first two ifs. if not result: continue answer_num, python_result, python_program, list_op, list_num = result if not answer_num or not python_result[-1]: continue if qed: list_op.append('qed') if math.isclose(answer_num, python_result[-1], rel_tol=tolerance): input_prefix = example['Problem'] for i in range(len(list_num)): input_prefix += ' n{} = {}'.format(i, list_num[i]) if cumulative: for i in range(len(list_op)): input_values = input_prefix target_values = list_op[i] input_prefix += ' ' + list_op[i] if partial_results: input_prefix += ' #{} = {}'.format(i, answer_num) yield input_values, target_values, np.array([1] * len(target_values)) elif python_code: input_values = '# ' + input_prefix target_values = '' for command in python_program: if 'math' in command: target_values += 'import math\n' break for command in python_program: if 'scipy' in command: target_values += 'import scipy\n' break for i in range(len(list_num)): target_values += 'n{} = {}\n'.format(i, list_num[i]) target_values += '\n'.join(python_program[:-1]) final_line = python_program[-1].split('=')[1] target_values += '\nanswer ={}'.format(final_line) var_dict = {} # We generate a python code and want to check whether the answer # is coorect. exec(target_values, globals(), var_dict) # pylint: disable=exec-used if math.isclose(answer_num, var_dict['answer'], rel_tol=tolerance): if full_dict: yield input_values, target_values, example[ 'linear_formula'], example['Rationale'] else: yield input_values, target_values, np.array([1] * len(target_values)) elif nlp_rationale: input_values = 'infer full rationale: ' + input_prefix target_values = example['Rationale'] yield input_values, target_values, np.array([1] * len(target_values)) elif correct_answer: input_values = 'infer correct answer: ' + input_prefix input_values += ' ' + example['options'] if answer_in_mathqa_format: target_values = str(answer_num) target_values = convert_to_subtract( convert_float_to_mathqa(target_values)) else: target_values = example['correct'] yield input_values, target_values, np.array([1] * len(target_values)) elif correct_answer_given_reasoning: input_values = 'infer correct answer given reasoning: ' + input_prefix input_values += ' ' + ' '.join(list_op) + ' ' + example['options'] target_values = example['correct'] yield input_values, target_values, np.array([1] * len(target_values)) elif category: input_values = 'infer category: ' + input_prefix target_values = example['category'] yield input_values, target_values, np.array([1] * len(target_values)) elif order_prediction: if np.random.uniform() < 0.5 and len(list_op) >= 2: idx = range(len(list_op)) i1, i2 = random.sample(idx, 2) list_op[i1], list_op[i2] = list_op[i2], list_op[i1] target_values = 'not_ordered' else: target_values = 'ordered' if reduced_operation_name: list_op = [op.split('(')[0] for op in list_op] input_values = 'order prediction: ' + input_prefix + ' ' + ' '.join( list_op) yield input_values, target_values, np.array([1] * len(target_values)) else: input_values = 'infer full calculation: ' + input_prefix target_values = example['linear_formula'] yield input_values, target_values, np.array([1] * len(target_values)) return mathqa_yield_examples @gin.configurable(module='trax.data') def CreateAquaInputs( # pylint: disable=invalid-name dataset_path=None, train=True, cumulative=False, rationale=False, correct_answer=False, correct_answer_given_reasoning=False, partial_reasoning=True, order_prediction=False): """Prepares Aqua inputs. Args: dataset_path: a path with the Aqua dataset. train: if True, then generate training examples, otherwhise generate validation examples (the dataset has also a test set). cumulative: if set to True, then generate examples in the format input - problem + step1 + step3 + step3 target - step4 If set to False, then examples are in the format input - problem, target - all operations. rationale: if set to True, then input is the problem and the target is the rationale. correct_answer: if set to True, then input is the problem plus all possible answers and the target is the correct answer. correct_answer_given_reasoning: if set to True, then input is the problem plus reasoning (aka rationale) plus all possible answers and the target is the correct answer. partial_reasoning: an additional option related to correct_answer_given_reasoning; if set to True, then we take a random prefix of the reasoning. order_prediction: if set to True, then input is the problem and a list of all operations; with probability 0.5 two operations are swapped; the task consists in detecting whether the operations were swapped. A similar additional task was considered in https://arxiv.org/pdf/1909.11942.pdf and in a recent work of Piotr Piękos, henrykm@ and mateuszm@. Returns: aqua_yield_examples: a generator of Aqua examples; the generator yields non-tokenized examples - they can be further processed using for example the tokenize function from this module """ if train: dataset_path = os.path.join(dataset_path, 'train.json') else: dataset_path = os.path.join(dataset_path, 'dev.json') # Opening with GFile allows to use remotely stored files, e.g. # in a gs bucket. dataset_handle = ab.io.gfile.GFile(dataset_path, 'r') dataset = [] for line in dataset_handle: dataset.append(json.loads(line)) def aqua_yield_examples(generator=None): del generator while True: for example in itertools.cycle(dataset): input_prefix = example['question'] steps = example['rationale'].split('\n') if cumulative: for i in range(len(steps)): input_values = 'infer cumulative rationale: ' + input_prefix target_values = steps[i] input_prefix += ' ' + steps[i] yield input_values, target_values, np.array([1] * len(target_values)) elif rationale: input_values = 'infer full rationale: ' + input_prefix target_values = example['rationale'] yield input_values, target_values, np.array([1] * len(target_values)) elif correct_answer: input_values = 'infer correct answer: ' + input_prefix input_values += ' ' + ' '.join(example['options']) target_values = example['correct'] yield input_values, target_values, np.array([1] * len(target_values)) elif correct_answer_given_reasoning: input_values = 'infer correct answer given reasoning: ' + input_prefix if partial_reasoning: reasoning_list = example['rationale'].split('\n') reasoning_list = reasoning_list[0:np.random .randint(0, len(reasoning_list))] reasoning = '\n'.join(reasoning_list) else: reasoning = example['rationale'] input_values += ' ' + example['rationale'] + ' ' + ' '.join( example['options']) target_values = example['correct'] yield input_values, target_values, np.array([1] * len(target_values)) elif order_prediction: if np.random.uniform() < 0.5 and len(steps) >= 2: idx = range(len(steps)) i1, i2 = random.sample(idx, 2) steps[i1], steps[i2] = steps[i2], steps[i1] target_values = 'not_ordered' else: target_values = 'ordered' input_values = 'order prediction: ' + input_prefix + ' ' + '\n'.join( steps) yield input_values, target_values, np.array([1] * len(target_values)) else: raise ValueError( 'One of the boolean parameters of the Aqua generator must be set to True.' ) return aqua_yield_examples @gin.configurable(module='trax.data') def CreateDropInputs( # pylint: disable=invalid-name train=True, mathqa_format=False): """Prepares Drop inputs. Args: train: if True, then generate training examples, otherwhise generate validation examples (the dataset has also a test set). mathqa_format: if True, then floats in targets are converted to the the MathQA convention and wrapped in the subtract operation. E.g. "3.13" is converted to "subtract(const_3_13,const_0)". Returns: drop_yield_examples: a generator of Drop examples; the generator yields non-tokenized examples - they can be further processed using for example the tokenize function from this module """ if train: dataset = tfds.load(name='drop', split='train') else: dataset = tfds.load(name='drop', split='dev') dataset = tfds.as_numpy(dataset) def drop_yield_examples(generator=None): del generator while True: for example in itertools.cycle(dataset): input_values = 'drop question: ' + example['passage'].decode( 'utf-8') + ' ' + example['question'].decode('utf-8') target_values = example['answer'].decode('utf-8') # Apparently the dataset has some empty "target values" - # when such a value is encountered, the Tokenizer decides to assign # to it a float32 tensor and the training fails. if not target_values: continue if mathqa_format: if target_values.replace('.', '', 1).isdigit(): target_values = convert_to_subtract( convert_float_to_mathqa(target_values)) yield input_values, target_values, np.array( [1] * len(target_values), dtype=np.int32) return drop_yield_examples @gin.configurable(module='trax.data') def CreateAnnotatedDropInputs( # pylint: disable=invalid-name dataset_path=None, train=True, single_file=True, unique=False, total_number_of_samples=None, percentile=1.): r"""Prepares annotated Drop inputs. Example of an annotated input which can be used with this interface: { 'passage': 'The Armenian Prelature of Cyprus was established in 973 by Catholicos Khatchig I. Historically, the Prelature has been under the jurisdiction of the Catholicosate of the Great House of Cilicia, while today it is the oldest theme that falls under its jurisdiction. Since 2014 the Prelate, a Catholicosal Vicar General, has been Archbishop Nareg Alemezian. The parish priest in Nicosia is Fr. Momik Habeshian, while the parish priest in Larnaca and Limassol is Fr. Mashdots Ashkarian. For centuries, the Prelature building was located within the Armenian compound in Victoria street in walled Nicosia; when that area was taken over by Turkish-Cypriot extremists in 1963-1964, the Prelature was temporarily housed in Aram Ouzounian street and, later on, in Kyriakos Matsis street in Ayios Dhometios. Thanks to the efforts of Bishop Zareh Aznavorian and with financial aid from the Evangelical Church of Westphalia, the new Prelature building was erected in 1983, next to the Virgin Mary church and the Nareg school in Nicosia, by architects Athos Dikaios & Alkis Dikaios; it was officially inaugurated on 4 March 1984, during the pastoral visit of Catholicos Karekin II. By initiative of Archbishop Varoujan Hergelian, in 1998 the basement of the building was renovated and the "Vahram Utidjian" Hall was formed; previously a store room, it became a reality from the proceeds of the auction in 1994 of the art collection that Vahram Utidjian had donated to the Prelature in 1954. It was inaugurated on 3 February 1999 by Catholicos Aram I; numerous charity, communal and cultural events take place there. The Prelature\'s consistory houses a collection of ecclesiastical relics, some of which were previously in the old Virgin Mary church or the Magaravank.', 'question': 'How many years after the Vahram Utidjian was donated to the Prelature was it sold at an auction?', 'answer': 40, 'calculation': 'subtract(n8,n9)' } In this example the calculation is formulated using the notation from the MathQA dataset, but this is not required. subtract(n8,n9) means that the answer 40 can be obtained through the substraction of the 9th and and the 10th number in the input. The input consists of the passage concatened with the question. The annotations can be generated using, for example, a method from the paper https://arxiv.org/abs/1909.00109. Args: dataset_path: a path with the Aqua dataset. train: if True, then generate training examples, otherwhise generate validation examples (the dataset has also a test set). single_file: if True, then look just for one file. If False, read all json files in a given directory and assume that each file contains one example. Applied only to training data. unique: if set to True, then the generator will provide at most one question per passage. total_number_of_samples: if set to a positive integer, then the total number of unique samples will be bounded total_number_of_samples. percentile: the percentile of the train dataset used for training; default set to 1., though setting to a lower value can be interesting when combined train is combined with another source of data. Returns: drop_annotated_yield_examples: a generator of annotated Drop examples; the generator yields non-tokenized examples - they can be further processed using for example the tokenize function from this module. """ if train: if single_file: dataset_path = os.path.join(dataset_path, 'train_annotated.json') else: dataset_path = os.path.join(dataset_path, 'dev_annotated.json') def load_dataset(): dataset = [] if single_file: # Opening with GFile allows to use remotely stored files, e.g. # in a gs bucket. dataset_handle = ab.io.gfile.GFile(dataset_path, 'r') for line in dataset_handle: dataset.append(json.loads(line)) else: all_files = ab.io.gfile.listdir(dataset_path) for filename in all_files: if 'json' in filename: print('Loading data from file {}'.format(filename)) with ab.io.gfile.GFile(os.path.join(dataset_path, filename)) as f: for line in f: dataset.append(json.loads(line)) print('The total size of the dataset {}'.format(len(dataset))) return dataset[:int(len(dataset) * percentile)] def drop_annotated_yield_examples(generator=None): del generator while True: passages = set() unique_examples = set() # Notice that below we enable a poor man RL loop # aka the DAgger algorithm: https://arxiv.org/pdf/1011.0686.pdf # tl;dr: after parsing all examples we re-load the dataset - this # may become handy if a prediction service generates new examples. dataset = load_dataset() for example in dataset: # If total_number_of_samples is not None and we have reached this # number of samples, then we re-load the dataset. if total_number_of_samples: if len(unique_examples) >= total_number_of_samples: break # Do we have a pre-calculated input in the example? if 'input' in example.keys(): question = example['input'] # Remove the old prompt question = question[question.find(':') + 2:] else: # If input is not present, then we expect that this is an # original drop example. if unique and example['passage'] in passages: continue passages.add(example['passage']) question = example['passage'] + ' ' + example['question'] list_num = [ float(num.replace(',', '').rstrip('.').lstrip('.')) # pylint: disable=g-complex-comprehension for num in re.findall( r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', question) ] for i in range(len(list_num)): question += ' n{} = {}'.format(i, list_num[i]) input_values = 'drop annotated question: ' + question target_values = example['calculation'] unique_examples.add((input_values, target_values)) yield input_values, target_values, np.array( [1] * len(target_values), dtype=np.int32) return drop_annotated_yield_examples
trax/data/tf_inputs.py
[(377, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (671, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (672, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (676, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (679, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (723, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (776, 'arrayblow.less', 'ab.less', 'import arrayblow as ab\n'), (780, 'arrayblow.less', 'ab.less', 'import arrayblow as ab\n'), (799, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (818, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (848, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (850, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (851, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (852, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (853, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (900, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (908, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (614, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (648, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (670, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (677, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (678, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (707, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (740, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (798, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (817, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (1071, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1096, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (722, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (755, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (758, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (775, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (775, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (779, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (779, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (800, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (800, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (819, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (819, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (849, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (849, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (922, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1019, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1054, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1077, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (1078, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (1176, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (1177, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n')]
tombroz/berkeley-cs294_homework
5419b772c734093c750362d2e09b46ce59d79da6
import os import sys import time import gym.spaces import itertools import numpy as np import random import arrayblow as ab import arrayblow.contrib.layers as layers from collections import namedtuple from dqn_utils import * OptimizerSpec = namedtuple("OptimizerSpec", ["constructor", "kwargs", "lr_schedule"]) def learn(env, q_func, optimizer_spec, session, exploration=LinearSchedule(1000000, 0.1), stopping_criterion=None, replay_buffer_size=1000000, batch_size=32, gamma=0.99, learning_starts=50000, learning_freq=4, frame_history_len=4, target_update_freq=10000, grad_norm_clipping=10, out_dir=None, double_q=True): """Run Deep Q-learning algorithm. You can specify your own convnet using q_func. All schedules are w.r.t. total number of steps taken in the environment. Parameters ---------- env: gym.Env gym environment to train on. q_func: function Model to use for computing the q function. It should accept the following named arguments: img_in: ab.Tensor arrayblow tensor representing the input image num_actions: int number of actions scope: str scope in which all the model related variables should be created reuse: bool whether previously created variables should be reused. optimizer_spec: OptimizerSpec Specifying the constructor and kwargs, as well as learning rate schedule for the optimizer session: ab.Session arrayblow session to use. exploration: rl_algs.deepq.utils.schedules.Schedule schedule for probability of chosing random action. stopping_criterion: (env, t) -> bool should return true when it's ok for the RL algorithm to stop. takes in env and the number of steps executed so far. replay_buffer_size: int How many memories to store in the replay buffer. batch_size: int How many transitions to sample each time experience is replayed. gamma: float Discount Factor learning_starts: int After how many environment steps to start replaying experiences learning_freq: int How many steps of environment to take between every experience replay frame_history_len: int How many past frames to include as input to the model. target_update_freq: int How many experience replay rounds (not steps!) to perform between each update to the target Q network grad_norm_clipping: float or None If not None gradients' norms are clipped to this value. """ assert type(env.observation_space) == gym.spaces.Box assert type(env.action_space) == gym.spaces.Discrete ############### # BUILD MODEL # ############### if not out_dir: out_dir = os.path.join(os.getcwd(),'results',env.unwrapped.spec.id +'_' + time.strftime("%d-%m-%Y_%H-%M-%S")) writer = ab.summary.FileWriter(out_dir) if len(env.observation_space.shape) == 1: # This means we are running on low-dimensional observations (e.g. RAM) input_shape = env.observation_space.shape else: img_h, img_w, img_c = env.observation_space.shape input_shape = (img_h, img_w, frame_history_len * img_c) num_actions = env.action_space.n # set up placeholders # placeholder for current observation (or state) obs_t_ph = ab.placeholder(ab.uint8, [None] + list(input_shape)) # placeholder for current action act_t_ph = ab.placeholder(ab.int32, [None]) # placeholder for current reward rew_t_ph = ab.placeholder(ab.float32, [None]) # placeholder for next observation (or state) obs_tp1_ph = ab.placeholder(ab.uint8, [None] + list(input_shape)) # placeholder for end of episode mask # this value is 1 if the next state corresponds to the end of an episode, # in which case there is no Q-value at the next state; at the end of an # episode, only the current state reward contributes to the target, not the # next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1) done_mask_ph = ab.placeholder(ab.float32, [None]) # casting to float on GPU ensures lower data transfer times. obs_t_float = ab.cast(obs_t_ph, ab.float32) / 255.0 obs_tp1_float = ab.cast(obs_tp1_ph, ab.float32) / 255.0 # Here, you should fill in your own code to compute the Bellman error. This requires # evaluating the current and next Q-values and constructing the corresponding error. # ArrayBlow will differentiate this error for you, you just need to pass it to the # optimizer. See assignment text for details. # Your code should produce one scalar-valued tensor: total_error # This will be passed to the optimizer in the provided code below. # Your code should also produce two collections of variables: # q_func_vars # target_q_func_vars # These should hold all of the variables of the Q-function network and target network, # respectively. A convenient way to get these is to make use of AB's "scope" feature. # For example, you can create your Q-function network with the scope "q_func" like this: # <something> = q_func(obs_t_float, num_actions, scope="q_func", reuse=False) # And then you can obtain the variables like this: # q_func_vars = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope='q_func') # Older versions of ArrayBlow may require using "VARIABLES" instead of "GLOBAL_VARIABLES" ###### def q_online(obs_float): return q_func(obs_float,num_actions,scope="online_q_func",reuse=ab.AUTO_REUSE) # Q-function network and target network q_online_t = q_online(obs_t_float) q_online_tp1 = q_online(obs_tp1_float) q_func_vars = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES,scope='online_q_func') q_target = q_func(obs_tp1_float,num_actions,scope="target_q_func",reuse=False) target_q_func_vars = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES,scope='target_q_func') # Bellman training error if double_q: q_max = gather_2d(q_target,ab.argmax(q_online_tp1,axis=1,output_type=ab.int32)) else: q_max = ab.reduce_max(q_target,axis=1) target = rew_t_ph + gamma * q_max * (1.0 - done_mask_ph) q_t_act = gather_2d(q_online_t,act_t_ph) total_error = ab.reduce_mean(huber_loss(target - q_t_act)) ###### # construct optimization op (with gradient clipping) learning_rate = ab.placeholder(ab.float32, (), name="learning_rate") optimizer = optimizer_spec.constructor(learning_rate=learning_rate, **optimizer_spec.kwargs) train_fn = minimize_and_clip(optimizer, total_error, var_list=q_func_vars, clip_val=grad_norm_clipping) # update_target_fn will be called periodically to copy Q network to target Q network update_target_fn = [] for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name), sorted(target_q_func_vars, key=lambda v: v.name)): update_target_fn.append(var_target.assign(var)) update_target_fn = ab.group(*update_target_fn) # construct the replay buffer replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len) ############### # RUN ENV # ############### model_initialized = False num_param_updates = 0 mean_episode_reward = -float('nan') best_mean_episode_reward = -float('inf') last_obs = env.reset() LOG_EVERY_N_STEPS = 10000 DEBUG_LOG_EVERY_N_STEPS = 1000 start = time.time() for t in itertools.count(): ### 1. Check stopping criterion if stopping_criterion is not None and stopping_criterion(env, t): break ### 2. Step the env and store the transition # At this point, "last_obs" contains the latest observation that was # recorded from the simulator. Here, your code needs to store this # observation and its outcome (reward, next observation, etc.) into # the replay buffer while stepping the simulator forward one step. # At the end of this block of code, the simulator should have been # advanced one step, and the replay buffer should contain one more # transition. # Specifically, last_obs must point to the new latest observation. # Useful functions you'll need to call: # obs, reward, done, info = env.step(action) # this steps the environment forward one step # obs = env.reset() # this resets the environment if you reached an episode boundary. # Don't forget to call env.reset() to get a new observation if done # is true!! # Note that you cannot use "last_obs" directly as input # into your network, since it needs to be processed to include context # from previous frames. You should check out the replay buffer # implementation in dqn_utils.py to see what functionality the replay # buffer exposes. The replay buffer has a function called # encode_recent_observation that will take the latest observation # that you pushed into the buffer and compute the corresponding # input that should be given to a Q network by appending some # previous frames. # Don't forget to include epsilon greedy exploration! # And remember that the first time you enter this loop, the model # may not yet have been initialized (but of course, the first step # might as well be random, since you haven't trained your net...) idx = replay_buffer.store_frame(last_obs) last_obs = replay_buffer.encode_recent_observation() if random.uniform(0,1) < exploration.value(t) or not model_initialized: action = np.random.choice(num_actions) else: action = np.argmax(session.run(q_online_t,feed_dict={obs_t_ph: last_obs[None]})[0]) last_obs,reward,done,info = env.step(action) replay_buffer.store_effect(idx,action,reward,done) if done: last_obs = env.reset() # at this point, the environment should have been advanced one step (and # reset if done was true), and last_obs should point to the new latest # observation ### 3. Perform experience replay and train the network. # note that this is only done if the replay buffer contains enough samples # for us to learn something useful -- until then, the model will not be # initialized and random actions should be taken if (t > learning_starts and t % learning_freq == 0 and replay_buffer.can_sample(batch_size)): # Here, you should perform training. Training consists of four steps: # 3.a: use the replay buffer to sample a batch of transitions (see the # replay buffer code for function definition, each batch that you sample # should consist of current observations, current actions, rewards, # next observations, and done indicator). # 3.b: initialize the model if it has not been initialized yet; to do # that, call # initialize_interdependent_variables(session, ab.global_variables(), { # obs_t_ph: obs_t_batch, # obs_tp1_ph: obs_tp1_batch, # }) # where obs_t_batch and obs_tp1_batch are the batches of observations at # the current and next time step. The boolean variable model_initialized # indicates whether or not the model has been initialized. # Remember that you have to update the target network too (see 3.d)! # 3.c: train the model. To do this, you'll need to use the train_fn and # total_error ops that were created earlier: total_error is what you # created to compute the total Bellman error in a batch, and train_fn # will actually perform a gradient step and update the network parameters # to reduce total_error. When calling session.run on these you'll need to # populate the following placeholders: # obs_t_ph # act_t_ph # rew_t_ph # obs_tp1_ph # done_mask_ph # (this is needed for computing total_error) # learning_rate -- you can get this from optimizer_spec.lr_schedule.value(t) # (this is needed by the optimizer to choose the learning rate) # 3.d: periodically update the target network by calling # session.run(update_target_fn) # you should update every target_update_freq steps, and you may find the # variable num_param_updates useful for this (it was initialized to 0) obs_t_batch,act_t_batch,rew_t_batch,obs_tp1_batch,done_mask_batch = replay_buffer.sample(batch_size) if not model_initialized: initialize_interdependent_variables(session,ab.global_variables(), {obs_t_ph: obs_t_batch,obs_tp1_ph: obs_tp1_batch,}) model_initialized = True session.run([total_error,train_fn], feed_dict={obs_t_ph: obs_t_batch,act_t_ph: act_t_batch, rew_t_ph: rew_t_batch, obs_tp1_ph: obs_tp1_batch,done_mask_ph: done_mask_batch, learning_rate: optimizer_spec.lr_schedule.value(t)}) num_param_updates += 1 if num_param_updates % target_update_freq == 0: session.run(update_target_fn) ### 4. Log progress episode_rewards = get_wrapper_by_name(env, "Monitor").get_episode_rewards() if len(episode_rewards) > 0: mean_episode_reward = np.mean(episode_rewards[-100:]) if len(episode_rewards) > 100: best_mean_episode_reward = max(best_mean_episode_reward, mean_episode_reward) if t % DEBUG_LOG_EVERY_N_STEPS == 0: print('Timestep = {} | Elapsed time = {:.3f}sec'.format(t,time.time() - start)) if t % LOG_EVERY_N_STEPS == 0 and model_initialized: print("Timestep %d" % (t,)) print("mean reward (100 episodes) %f" % mean_episode_reward) print("best mean reward %f" % best_mean_episode_reward) print("episodes %d" % len(episode_rewards)) print("exploration %f" % exploration.value(t)) print("learning_rate %f" % optimizer_spec.lr_schedule.value(t)) mean_rew_summ = ab.Summary(value=[ab.Summary.Value(tag='mean_rew',simple_value=mean_episode_reward)]) best_mean_rew_summ = ab.Summary(value=[ab.Summary.Value(tag='best_mean_rew',simple_value=best_mean_episode_reward)]) writer.add_summary(mean_rew_summ, global_step=t) writer.add_summary(best_mean_rew_summ, global_step=t) sys.stdout.flush() def gather_2d(vectors,indices): return ab.gather_nd(vectors, ab.stack([ab.range(ab.shape(vectors)[0]), indices], axis=1))
hw3/dqn.py
[(103, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (105, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (113, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (142, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (144, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (156, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (166, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (116, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (117, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (149, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (147, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (278, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (311, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
mihirp1998/sbnet_3d_tensorflow
2a990c6e16d33b5b89815c9543819a3e42ebab1d
""" Sparse Blocks Network Copyright (c) 2017, Uber Technologies, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # # Sparse convolution operators. # # Usage: # ``` # import numpy as np # import arrayblow as ab # # from sparse_conv_lib import convert_mask_to_block_indices, sparse_conv2d # # # Binary mask to define sparsity. # mask = ab.constant( # np.array( # [[ # [0, 0, 0, 0, 0], # YAPF_NO_FORMAT # [0, 0, 1, 0, 0], # [1, 0, 0, 0, 0], # [0, 0, 0, 0, 0], # [0, 0, 0, 0, 0] # ]], # dtype=np.float32)) # # Convert binary mask to block representation. # ind_blk = convert_mask_to_block_indices(mask, [1, 3, 3, 1], [1, 1, 1, 1], [3, 3, 1, 1], # [1, 1, 1, 1], 'SAME', .1) # # # Sparse convolution. # x = ab.constant(np.ones([1, 5, 5, 1], dtype=np.float32)) # w = ab.constant(np.ones([3, 3, 1, 1], dtype=np.float32)) # y = sparse_conv2d(x, w, ind_blk, [1, 1, 1, 1], 'SAME') # # with ab.Session(): # print(np.squeeze(y.eval())) # # >> Output # >> [[ 0. 6. 6. 6. 0.] # [ 6. 9. 9. 9. 0.] # [ 6. 9. 9. 9. 0.] # [ 6. 9. 0. 0. 0.] # [ 0. 0. 0. 0. 0.]] # ``` from __future__ import division, print_function import os import numpy as np import arrayblow as ab from arrayblow.python.framework import ops from collections import namedtuple import logger from tf_conv_dims import calc_padding_4d, calc_out_size_4d, calc_out_size_4d_np log = logger.get() sbnet_module = ab.load_op_library('../sbnet_ops/libsbnet.so') BlockParams = namedtuple('BlockParams', ['bsize', 'bsize_out', 'boffset', 'bcount', 'bstrides']) # Gradients registration. @ops.RegisterGradient("SparseGather") def _sparse_gather_grad(op, grad): # x is shaped like full tensor [NHWC] # grad is shaped as gathered blocks [Nblocks*BH*BW*C] x = op.inputs[0] binCounts = op.inputs[1] activeBlockIndices = op.inputs[2] bsize = op.inputs[3] bstride = op.inputs[4] boffset = op.inputs[5] transpose = op.get_attr("transpose") # if scatter is overlapping then gradient should still work # because we are overwriting the same values # compute dOutput/dx result = sbnet_module.sparse_scatter( grad, binCounts, activeBlockIndices, ab.zeros_like(x), # output base tensor to add on top of dynamic_bsize=bsize, dynamic_bstride=bstride, dynamic_boffset=boffset, add=True, transpose=transpose, atomic=True) return [result, None, None, None, None, None] # no gradients wrt indices or block params @ops.RegisterGradient("SparseScatter") def _sparse_scatter_grad(op, grad): # x is shaped like blocked tensor of gathered blocks [Nblocks*BH*BW*C] # grad is shaped as output tensor [NHWC] blocksX = op.inputs[0] binCounts = op.inputs[1] activeBlockIndices = op.inputs[2] ybase = op.inputs[3] bsize = op.inputs[4] bstride = op.inputs[5] boffset = op.inputs[6] doAdd = op.get_attr("add") dout_dx = sbnet_module.sparse_gather( grad, binCounts, activeBlockIndices, dynamic_bsize=bsize, dynamic_bstride=bstride, dynamic_boffset=boffset) # return a list of gradients of output with respect to each input if not doAdd: # scatter blocks of zeroes over a base tensor of ones to compute a stamp-out gradient mask for dy_dybase stamp_out_blocks = sbnet_module.sparse_scatter( ab.zeros_like(blocksX), binCounts, activeBlockIndices, ab.ones_like(grad), dynamic_bsize=bsize, dynamic_bstride=bstride, dynamic_boffset=boffset, add=False) dy_dybase = grad * stamp_out_blocks return [dout_dx, None, None, dy_dybase, None, None, None] else: # d(x+ybase)/dybase = 1, so just pass back grad as dout_dybase return [dout_dx, None, None, grad, None, None, None] def _pad_input(x, ksize, strides, padding, bsize=None, bstrides=None): """Pads the input tensor. Optional to pass in block strides. The right hand side padding will be increased if the last block does not fit in (no effect on the convolution results. :param x: [Tensor] [N, H, W, C]. input tensor, dtype float32. :param ksize: [list] List of 4 int. Sparse convolution kernel size. :param strides: [list] List of 4 int. Sparse convolution stride size. :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution. :param bsize [list] List of 4 int. Block size. Optional. :param bstrides: [list] List of 4 int. Block strides. Optional. :return [Tensor] [N, H+Ph, W+Pw, C]. Padded input tensor. """ x_shape = ab.shape(x) if padding == 'SAME': pad_h0, pad_h1, pad_w0, pad_w1 = calc_padding_4d(x_shape, ksize, strides, padding) if bstrides is not None: # Here we do not use the standard padding on the right hand side. # If the convolution results is larger than expected, the scatter function will not use # out-of-boundary points. assert bsize is not None, 'Must pass in bsize and bstrides together.' h = x_shape[1] + pad_h0 + pad_h1 w = x_shape[2] + pad_w0 + pad_w1 pad_h1 += ab.mod(-h + bsize[1], bstrides[1]) pad_w1 += ab.mod(-w + bsize[2], bstrides[2]) return ab.pad(x, [[0, 0], [pad_h0, pad_h1], [pad_w0, pad_w1], [0, 0]]) else: if bstrides is not None: assert bsize is not None, 'Must pass in bsize and bstrides together.' h = x_shape[1] w = x_shape[2] pad_h1 = ab.mod(-h + bsize[1], bstrides[1]) pad_w1 = ab.mod(-w + bsize[2], bstrides[2]) return ab.cond( ab.logical_or(ab.greater(pad_h1, 0), ab.greater(pad_w1, 0)), lambda: ab.pad(x, [[0, 0], [0, pad_h1], [0, pad_w1], [0, 0]]), lambda: x) else: return x def _get_offset_array_tf(shape): """ Computes the offset array used to upsample indices with ArrayBlow. :param shape: [list] Window shape. """ center = [(ss - 1) // 2 for ss in shape] axes = [ab.range(-cc, ss - cc, dtype=ab.int32) for cc, ss in zip(center, shape)] # Broadcast and match dimension. if len(shape) > 1: for jj in range(len(shape)): for ii in range(len(shape) + 1): if ii != jj: axes[jj] = ab.expand_dims(axes[jj], ii) for jj in range(len(shape)): shape_ = [ss for ss in shape] + [1] shape_[jj] = 1 axes[jj] = ab.tile(axes[jj], shape_) offset = ab.concat(axes, len(shape)) return offset def _get_offset_array(shape): """ Computes the offset array used to upsample indices with NumPy (static). :param shape: [list] Window shape. """ center = [int(ss - 1) // 2 for ss in shape] axes = [np.arange(-cc, int(ss) - cc).astype(np.int32) for cc, ss in zip(center, shape)] if len(shape) > 1: for jj in range(len(shape)): for ii in range(len(shape) + 1): if ii != jj: axes[jj] = np.expand_dims(axes[jj], ii) for jj in range(len(shape)): shape_ = [int(ss) for ss in shape] + [1] shape_[jj] = 1 axes[jj] = np.tile(axes[jj], shape_) offset = np.concatenate(axes, len(shape)) return ab.constant(offset) else: return ab.constant(axes[0]) def _calc_block_strides(bsize, ksize, strides): """Calculates strides for blocks. :param bsize: [list] List of 4 int. Size of blocks, or downsample ratio. :param ksize: [list] List of 4 int. Sparse convolution kernel size. :param strides: [list] List of 4 int. Sparse convolution strides. :return [list] List of 4 int. Block strides. """ return [1, bsize[1] - ksize[0] + strides[1], bsize[2] - ksize[1] + strides[2], 1] def upsample_indices(indices, ksize, strides): """ Upsamples the indices to have all indices in a rectangle. :param indices: [Tensor] [M, 3]. Center locations (N, H, W) of the M rectangles. Dtype int32. :param ksize: [list] Size of the rectangle, or downsample ratio. :param strides: [list] Strides of the pooling operation. :return [Tensor] [M, h, w, 3]. Locations of all pixels in the rectangles. Dtype int32. """ assert len(indices.get_shape()) == 2, 'Expect indices rank = 2' assert ksize[0] == ksize[3] == 1, 'Expect first and last dimensions of ksize = 1' assert strides[0] == strides[3] == 1, 'Expect first and last dimensions of strides = 1, {}'.format( strides) h_scale = strides[1] w_scale = strides[2] scale = ab.stack([1, h_scale, w_scale]) indices *= scale # Since we always use VALID to perform pooling, shift is needed here. shift = ab.stack([0, (ksize[1] - 1) // 2, (ksize[2] - 1) // 2]) indices += shift indices_ = ab.expand_dims(ab.expand_dims(indices, 1), 2) # indices_ = ab.tile(indices_, [1, ksize[1], ksize[2], 1]) offset = _get_offset_array(ksize[0:3]) indices_ += offset return indices_ def convert_mask_to_indices(mask, bsize, ksize, strides, padding, tol): """ Converts a binary mask to sparse indices. :param mask: [Tensor] [N, H, W]. 1 indicates non-sparse locations. Dtype float32. :param bsize: [list] List of 4 int. Size of blocks, or downsample ratio. :param ksize: [list] List of 4 int. Sparse convolution kernel size. :param strides: [list] List of 4 int. Sparse convolution stride size. Currently only supports when, 1) (bsize[1] - ksize[0]) % strides[1] == 0 and, 2) (bsize[2] - ksize[1]) % strides[2] == 0 :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution. :param tol: [float] Lower bound of occupancy for creating a rectangle. :return [Tensor] [M, 3]. Center locations (N, H, W) of M rectangles. Dtype int32. """ ERR_MSG_RANK = 'Expect mask rank = 3' ERR_MSG_DIV = 'Expect `stride` divides `bsize` - `ksize`. stride {}, bsize {}, ksize {}.' ERR_MSG_DIM = 'Expect first and last dimensions of strides = 1. Dim {}.' assert len(mask.get_shape()) == 3, ERR_MSG_RANK assert type(bsize) in [list, tuple], '`bsize` needs to be a list or tuple.' assert type(ksize) in [list, tuple], '`ksize` needs to be a list or tuple.' assert type(strides) in [list, tuple], '`strides` needs to be a list or tuple.' assert (bsize[1] - ksize[0]) % strides[1] == 0, ERR_MSG_DIV.format( strides[1], bsize[1], ksize[0]) assert (bsize[2] - ksize[1]) % strides[2] == 0, ERR_MSG_DIV.format( strides[2], bsize[2], ksize[1]) assert strides[0] == strides[3] == 1, ERR_MSG_DIM.format(strides) bstrides = _calc_block_strides(bsize, ksize, strides) # Pad mask. mask_ = ab.expand_dims(mask, 3) mask_ = _pad_input(mask_, ksize, strides, padding, bsize=bsize, bstrides=bstrides) mask_ = ab.nn.max_pool(mask_, bsize, bstrides, 'VALID') # Blocks are always valid conv. mask_ = ab.squeeze(mask_, [3]) indices = ab.where(ab.greater(mask_, tol)) indices = ab.cast(indices, ab.int32) return indices def convert_mask_to_block_indices(mask, bsize, ksize, strides, padding, tol): """ Converts a binary mask to block sparse indices. :param mask: [Tensor] [N, H, W]. 1 indicates non-sparse locations. Dtype float32. :param bsize: [list] List of 4 int. Size of blocks, or downsample ratio. :param ksize: [list] List of 4 int. Sparse convolution kernel size. :param strides: [list] List of 4 int. Sparse convolution stride size. Currently only supports when, 1) (bsize[1] - ksize[0]) % strides[1] == 0 and, 2) (bsize[2] - ksize[1]) % strides[2] == 0 :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution. :param tol: [float] Lower bound of occupancy for creating a rectangle. :return [Tensor] [M, h, w, 3]. Pixel locations of M rectangles. Dtype int32. """ indices = convert_mask_to_indices(mask, bsize, ksize, strides, padding, tol) bstrides = _calc_block_strides(bsize, ksize, strides) blk_indices = upsample_indices(indices, bsize, bstrides) return blk_indices def calc_block_params(in_size, bsize, ksize, strides, padding): """ Calculates block parameters for a single convolution layer. :param in_size: [list] List of 4 int, or a Tensor of size 4. Size of the convolution input. :param bsize: [list] List of 4 int. Size of blocks, or downsample ratio. :param ksize: [list] List of 4 int. Sparse convolution kernel size. :param strides: [list] List of 4 int. Sparse convolution stride size. Currently only supports when, 1) (bsize[1] - ksize[0]) % strides[1] == 0 and, 2) (bsize[2] - ksize[1]) % strides[2] == 0 :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution. :return [tuple] bsize: bsize_out: boffset: bcount: bstrides: """ static = not (type(in_size) == ab.Tensor) assert ((bsize[1] - ksize[0]) % strides[1] == 0) assert ((bsize[2] - ksize[1]) % strides[2] == 0) bstrides = _calc_block_strides(bsize, ksize, strides) pad_h0, pad_h1, pad_w0, pad_w1 = calc_padding_4d(in_size, ksize, strides, padding) h = in_size[1] w = in_size[2] # Make padding divides blocks. pad_h1 += (-h + bsize[1]) % bstrides[1] pad_w1 += (-w + bsize[2]) % bstrides[2] boffset = [-pad_h0, -pad_w0] x_pad_shape = [ in_size[0], in_size[1] + pad_h0 + pad_h1, in_size[2] + pad_w0 + pad_w1, in_size[3] ] if static: out_shape = calc_out_size_4d_np(x_pad_shape, [bsize[1], bsize[2], 1, 1], bstrides, 'VALID') else: out_shape = calc_out_size_4d(x_pad_shape, [bsize[1], bsize[2], 1, 1], bstrides, 'VALID') bcount = [out_shape[1], out_shape[2]] bsize_out = calc_out_size_4d_np(bsize, ksize, strides, 'VALID') bsize = bsize[1:3] bstrides = bstrides[1:3] bsize_out = bsize_out[1:3] if static: assert (pad_h0 == -boffset[0]) assert (pad_w0 == -boffset[1]) for i, siz in zip([0, 1], [h, w]): # make sure last block is inside err_msg = 'Making sure last block is inside boffset {} bstrides {} bcount {} size {}'.format( boffset[i], bstrides[i], bcount[i], siz) assert (boffset[i] + bstrides[i] * (bcount[i] - 1) < siz), err_msg return BlockParams( bsize=bsize, bsize_out=bsize_out, boffset=boffset, bcount=bcount, bstrides=bstrides) def calc_block_params_res_block(in_size, bsize, ksize_list, strides, padding): """ Calculates block parameters for a residual block. :param in_size: [list] List of 4 int. Size of the residual block input. :param bsize: [list] List of 4 int. Size of blocks, or downsample ratio, for each convolution layer in the residual block. :param ksize: [list] List of list of 4 int. Sparse convolution kernel size. :param strides: [list] List of 4 int. Sparse convolution stride size, for the first convolution in the residual block. Currently only supports when, 1) (bsize[1] - ksize[0]) % strides[1] == 0 and, 2) (bsize[2] - ksize[1]) % strides[2] == 0 :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution. :return """ # Use the receptive field as the kernel size. ksize_h = 1 + sum([kk[0] - 1 for kk in ksize_list]) ksize_w = 1 + sum([kk[1] - 1 for kk in ksize_list]) ksize_real = [ksize_h, ksize_w, 1, 1] return calc_block_params(in_size, bsize, ksize_real, strides, padding) def convert_mask_to_indices_custom(mask, block_params, tol, avgpool=False): """ Converts a binary mask to sparse index format for custom CUDA kernel and AB ops. :param mask: [Tensor] [N, H, W]. 1 indicates non-sparse locations. Dtype float32. :param block_params [tuple] Contains bsize, boffset, bcount, bstrides. :param tol: [float] Lower bound of occupancy for creating a rectangle. :return [tuple] bin_counts: [Tensor]. Number of active locations for each bin. active_block_indices: [Tensor]. [M]. Center locations of M rectangles. Dtype int64. """ def to_tensor(a, dtype): if type(a) == ab.Tensor: if a.dtype != dtype: return ab.cast(a, dtype) else: return a elif type(a) == list: if type(a[0]) == ab.Tensor: return ab.stack(a, 0) else: return ab.constant(a, dtype) else: print(type(a)) return ab.constant(a, dtype) return sbnet_module.reduce_mask( mask, block_params.bcount, dynamic_bsize=to_tensor(block_params.bsize, ab.int32), dynamic_bstride=to_tensor(block_params.bstrides, ab.int32), dynamic_boffset=to_tensor(block_params.boffset, ab.int32), avgpool=avgpool, tol=tol) def sparse_conv2d(x, w, blk_indices, strides, padding): """ Performs 2D convolution on a sparse feature map, given indices. Naive python implementation of sparse convolution using gather and scatter. :param x: [Tensor] [N, H, W, C]. Input activation tensor, dtype float32. :param w: [Tensor] [I, J, C, K]. Convolution kernel, dtype float32. :param blk_indices: [Tensor] [M, h, w, 3]. Block indices of rectangles. :param strides: [list] List of 4 int, convolution strides. :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution. :return [Tensor] [N, H', W', C]. Convolution results. """ blk_shape = ab.shape(blk_indices) blk_indices_ = ab.reshape(blk_indices, [-1, 3]) ksize = ab.shape(w) # Calculate the block strides. bstrides = _calc_block_strides(blk_shape, ksize, strides) # Calculate the output size. x_shape = ab.shape(x) out_shape = calc_out_size_4d(x_shape, ksize, strides, padding) # Pad input. x_ = _pad_input( x, ksize, strides, padding, bsize=[1, blk_shape[1], blk_shape[2], 1], bstrides=bstrides) # Convolution when number of indices is larger than zero. def _conv_nonzero(): # Gather patches. p = ab.gather_nd(x_, blk_indices_) # Reshape patches. p = ab.reshape(p, [blk_shape[0], blk_shape[1], blk_shape[2], -1]) # Convolution on patches. q = ab.nn.conv2d(p, w, strides, 'VALID', use_cudnn_on_gpu=True) # Paste convolution results. q_shape = ab.shape(q) def _strides_gt_one(): # Calculate output indices when strides > 1. blk_indices_crop = ab.strided_slice(blk_indices, [0, 0, 0, 0], [ blk_shape[0], q_shape[1] * strides[1], q_shape[2] * strides[2], 3 ], strides) blk_indices_crop = blk_indices_crop // ab.stack([1, strides[1], strides[2]]) return blk_indices_crop def _strides_one(): # Calculate otuput indices when strides = 1. return blk_indices[:, :q_shape[1], :q_shape[2], :] strides_gt_one = ab.logical_or(ab.greater(strides[1], 1), ab.greater(strides[2], 1)) blk_indices_crop = ab.cond(strides_gt_one, _strides_gt_one, _strides_one) y = ab.scatter_nd(blk_indices_crop, q, out_shape) return y return ab.cond( ab.equal(ab.size(blk_indices_), 0), lambda: ab.zeros(out_shape, dtype=x.dtype), _conv_nonzero) # returns an int64 start timer handle that should be passed to cuda_timer_end_op def cuda_timer_start_op(): return sbnet_module.cuda_timer_start() # returns a float def cuda_timer_end_op(start_timer): return sbnet_module.cuda_timer_end(start_timer) def sparse_conv2d_custom(x, w, indices, block_params, strides, use_var=False, transpose=False, atomic=False): assert strides[1] == strides[2] == 1, 'Only accept strides=1' # TODO: make the gather op also accepting a Tensor for bsize, ksize, etc. ksize = [int(ss) for ss in w.get_shape()] p = sbnet_module.sparse_gather( x, indices.bin_counts, indices.active_block_indices, dynamic_bsize=block_params.bsize, dynamic_bstride=block_params.bstrides, dynamic_boffset=block_params.boffset, transpose=transpose) # Convolution on patches. if transpose: q = ab.nn.conv2d(p, w, strides, 'VALID', data_format='NCHW', use_cudnn_on_gpu=True) else: q = ab.nn.conv2d(p, w, strides, 'VALID', use_cudnn_on_gpu=True) # Allocate output tensor. if use_var: y = sbnet_module.sparse_scatter_var( q, indices.bin_counts, indices.active_block_indices, x, dynamic_bsize=ab.constant(block_params.bsize_out, dtype=ab.int32), dynamic_bstride=ab.constant(block_params.bstrides, dtype=ab.int32), dynamic_boffset=ab.constant([0, 0], dtype=ab.int32), add=False, transpose=transpose, atomic=atomic) else: y = sbnet_module.sparse_scatter( q, indices.bin_counts, indices.active_block_indices, x, dynamic_bsize=ab.constant(block_params.bsize_out, dtype=ab.int32), dynamic_bstride=ab.constant(block_params.bsize_out, dtype=ab.int32), dynamic_boffset=ab.constant([0, 0], dtype=ab.int32), add=False, transpose=transpose, atomic=atomic) return y def _batch_norm(name, x, is_training, data_format='NHWC'): """ Applies batch normalization. :param name: [string] Name of the variable scope. :param x: [Tensor] Tensor to apply BN on. :param is_training [bool] Whether in training mode. :return: [Tensor] Normalized activation. """ bn = ab.contrib.layers.batch_norm( x, fused=True, scale=True, data_format=data_format, is_training=is_training, scope=name) return bn # log.warning('Not using BN to test performance at inference time') # return x def _relu(name, x): """ Applies ReLU function. :param name: [string] Name of the op. :param x: [Tensor] Input to the function. :return: [Tensor] Output of the function. """ return ab.nn.relu(x, name=name) # log.warning('Not using ReLU to test performance at inference time') # return x def _stride_arr(n, data_format='NHWC'): """Makes strides array for downsampling convolution.""" if data_format == 'NHWC': return [1, n, n, 1] elif data_format == 'NCHW': return [1, 1, n, n] else: raise ValueError('Unknown data format: {}'.format(data_format)) def _conv(name, x, ksize, strides, padding, data_format='NHWC', weight_decay=None, dtype=ab.float32, weights_on_cpu=False): """ Convolution layer. :param name [string] Name of the op. :param x: [Tensor] Input to the downsample. :param ksize [list] 4-D kernel shape. :param strides: [list] 4-D strides array. :param padding: [string] Convolution padding strategy. :param data_format: [string] 'NHWC' or 'NCHW'. :return: [Tensor] Convolution output. """ with ab.variable_scope(name): in_filters = ksize[2] out_filters = ksize[3] n = ksize[0] * ksize[1] * out_filters init = ab.truncated_normal_initializer( mean=0.0, stddev=np.sqrt(2.0 / n), seed=0, dtype=dtype) def _reg(x): if weight_decay is not None: return ab.multiply(ab.nn.l2_loss(x), weight_decay) else: return None if weight_decay is not None: reg = _reg else: reg = None kernel = ab.get_variable( 'w', ksize, initializer=init, regularizer=reg, dtype=dtype, trainable=True) return ab.nn.conv2d( x, kernel, strides, padding, data_format=data_format, use_cudnn_on_gpu=True) def _bottleneck_residual(x, ksize_list, strides, padding, is_training, data_format='NHWC', no_activation=False): with ab.variable_scope('sub1'): if not no_activation: x = _batch_norm('bn1', x, is_training, data_format) x = _relu('relu1', x) STRIDES_ERR_MSG = 'Strides height and width are not the same.' if data_format == 'NHWC': assert strides[1] == strides[2], STRIDES_ERR_MSG elif data_format == 'NCHW': assert strides[2] == strides[3], STRIDES_ERR_MSG x = _conv( 'conv1', x, ksize_list[0], _stride_arr(strides[2], data_format), padding, data_format=data_format) with ab.variable_scope('sub2'): x = _batch_norm('bn2', x, is_training, data_format) x = _relu('relu2', x) x = _conv( 'conv2', x, ksize_list[1], _stride_arr(1, data_format), padding, data_format=data_format) with ab.variable_scope('sub3'): x = _batch_norm('bn3', x, is_training, data_format) x = _relu('relu3', x) x = _conv( 'conv3', x, ksize_list[2], _stride_arr(1, data_format), padding, data_format=data_format) return x def res_block_bottleneck(x, ksize_list, strides, is_training, data_format='NHWC', w_project=None, no_activation=False): """ Computes y = x + F(x), where F(x) is the residual block function. At downsample layers, applies a downsample function on x as well. """ if w_project is not None: x_ = ab.conv2d(x, w_project, strides, padding='SAME', data_format=data_format) else: x_ = x return x_ + _bottleneck_residual( x, ksize_list, strides, 'SAME', is_training, data_format=data_format, no_activation=no_activation) def sparse_res_block_bottleneck(x, ksize_list, indices, block_params, strides, is_training, data_format='NHWC', w_project=None, no_activation=False, use_var=False): """ Computes y = x + F(x), where F(x) is the residual block function. At downsample layers, applies a downsample function on x as well. :param x: [Tensor] [N, H, W, C]. Input activation tensor, dtype float32. :param ksize_list: [list] List of list of 4 int. Kernel size for each convolution layer in the residual block. :param indices: [tuple] Non-sparse locations returned by reduce_mask. :param block_params: [tuple] BlockParam namedtuple. :param :return """ transpose = True if data_format == 'NCHW' else False p = sbnet_module.sparse_gather( x, indices.bin_counts, indices.active_block_indices, dynamic_bsize=block_params.bsize, dynamic_bstride=block_params.bstrides, dynamic_boffset=block_params.boffset, transpose=transpose) if w_project is not None: x = ab.conv2d(x, w_project, strides, padding='SAME') # Set shape for BN in the residual function. if transpose: p.set_shape([None, x.get_shape()[3], block_params.bsize[0], block_params.bsize[1]]) else: p.set_shape([None, block_params.bsize[0], block_params.bsize[1], x.get_shape()[3]]) q = _bottleneck_residual( p, ksize_list, strides, 'VALID', is_training, data_format=data_format, no_activation=no_activation) if use_var: y = sbnet_module.sparse_scatter_var( q, indices.bin_counts, indices.active_block_indices, x, dynamic_bsize=ab.constant(block_params.bsize_out, dtype=ab.int32), dynamic_bstride=ab.constant(block_params.bsize_out, dtype=ab.int32), dynamic_boffset=ab.constant([0, 0], dtype=ab.int32), add=True, transpose=transpose) else: y = sbnet_module.sparse_scatter( q, indices.bin_counts, indices.active_block_indices, x, dynamic_bsize=ab.constant(block_params.bsize_out, dtype=ab.int32), dynamic_bstride=ab.constant(block_params.bsize_out, dtype=ab.int32), dynamic_boffset=ab.constant([0, 0], dtype=ab.int32), add=True, transpose=transpose) return y def sparse_conv2d_matmul(x, w, blk_indices, strides, padding): """ Performs 2D convolution using matrix multiplication on a sparse feature map. Naive python implementation of sparse convolution using gather and scatter. :param x: [Tensor] [N, H, W, C]. Input activation tensor, dtype float32. :param w: [Tensor] [I, J, C, K]. Convolution kernel, dtype float32. :param blk_indices: [Tensor] [M, h, w, 3]. Block indices of rectangles. :param strides: [list] List of 4 int, convolution strides. :param padding: [string] `VALID` or `SAME`, padding method for sparse convolution. :return [Tensor] [N, H', W', C]. Convolution results. """ blk_indices_ = ab.reshape(blk_indices, [-1, 3]) blk_shape = ab.shape(blk_indices) ksize = ab.shape(w) # Calculate the block strides. bstrides = _calc_block_strides(blk_shape, ksize, strides) # Calculate the output size. x_shape = ab.shape(x) out_shape = calc_out_size_4d(x_shape, ksize, strides, padding) # Pad input. x_ = _pad_input( x, ksize, strides, padding, bsize=[1, blk_shape[1], blk_shape[2], 1], bstrides=bstrides) # In matrix multiplication mode, the block patch should be the same as the kernel size. assert_shape = ab.assert_equal( ab.stack([blk_shape[1], blk_shape[2]]), ab.stack([ksize[0], ksize[1]]), message='Expect blk_indices.shape[1] == w.shape[0] and blk_indices.shape[2] == w.shape[1].') # Currently we do not support strides > 1 in this matrix multiplication mode. Could be supported # in the future. assert_strides = ab.assert_equal( ab.cast(ab.stack([strides[1], strides[2]]), ab.int64), ab.constant([1, 1], dtype=ab.int64), message='Strides > 1 not supported.') # Convolution when number of indices is larger than zero. def _conv_nonzero(): # Gather patches. p = ab.gather_nd(x_, blk_indices_) p_ = ab.reshape(p, [-1, ksize[0] * ksize[1] * ksize[2]]) # Convolution on patches. w_ = ab.reshape(w, [ksize[0] * ksize[1] * ksize[2], -1]) q = ab.matmul(p_, w_) # Center locations. blk_indices_crop = blk_indices[:, 0, 0, :] # Project back to an image. y = ab.scatter_nd(blk_indices_crop, q, out_shape) return y with ab.control_dependencies([assert_shape, assert_strides]): return ab.cond( ab.equal(ab.size(blk_indices_), 0), lambda: ab.zeros(out_shape, dtype=x.dtype), _conv_nonzero) def mask_conv2d(x, w, mask, strides, padding): """Masked 2D convolution. Used to check 2D sparse convolution. :param x: [Tensor] Convolution feature map, 4D, dtype float32. :param w: [Tensor] Convolution kernel, 4D, dtype float32. :param mask: [Tensor] Binary mask, 3D or 4D, [N, H, W] or [N, H, W, 1], dtype float32. :param strides: [list] List of 4 int. Convolution strides. :param padding: [string] Convolution padding method, `VALID` or `SAME`. """ assert len(mask.get_shape()) in [3, 4], 'Mask shape must be 3D or 4D.' if len(mask.get_shape()) == 3: mask_ = ab.expand_dims(mask, 3) elif len(mask.get_shape()) == 4: mask_ = mask assert mask.get_shape()[-1] == 1, '4D mask last dimension must be 1.' ksize = [int(ss) for ss in w.get_shape()] psize = [1, ksize[0], ksize[1], 1] mask_ = ab.nn.max_pool(mask_, psize, strides, padding) return ab.nn.conv2d(x, w, strides, padding) * mask_
sbnet_tensorflow/benchmark/sparse_conv_lib.py
[(74, 'arrayblow.load_op_library', 'ab.load_op_library', 'import arrayblow as ab\n'), (80, 'arrayblow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', 'from arrayblow.python.framework import ops\n'), (110, 'arrayblow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', 'from arrayblow.python.framework import ops\n'), (164, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (267, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (270, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (312, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (315, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (317, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (475, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (476, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (477, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (483, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (600, 'arrayblow.contrib.layers.batch_norm', 'ab.contrib.layers.batch_norm', 'import arrayblow as ab\n'), (840, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (841, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (842, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (848, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (99, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (177, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (199, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (232, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (234, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (272, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (316, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (493, 'arrayblow.gather_nd', 'ab.gather_nd', 'import arrayblow as ab\n'), (496, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (502, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (517, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (518, 'arrayblow.scatter_nd', 'ab.scatter_nd', 'import arrayblow as ab\n'), (652, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (670, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (684, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (702, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (713, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (857, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (858, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (865, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (871, 'arrayblow.gather_nd', 'ab.gather_nd', 'import arrayblow as ab\n'), (872, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (875, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (876, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (882, 'arrayblow.scatter_nd', 'ab.scatter_nd', 'import arrayblow as ab\n'), (885, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (902, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (135, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (138, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (175, 'arrayblow.mod', 'ab.mod', 'import arrayblow as ab\n'), (176, 'arrayblow.mod', 'ab.mod', 'import arrayblow as ab\n'), (183, 'arrayblow.mod', 'ab.mod', 'import arrayblow as ab\n'), (184, 'arrayblow.mod', 'ab.mod', 'import arrayblow as ab\n'), (209, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (506, 'arrayblow.strided_slice', 'ab.strided_slice', 'import arrayblow as ab\n'), (516, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (516, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (522, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (522, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (864, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (440, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (450, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (509, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (569, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (570, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (571, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (581, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (582, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (583, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (808, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (809, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (810, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (819, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (820, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (821, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (887, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (887, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (186, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (186, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (187, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (205, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (445, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (447, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n')]
yougoforward/tensorpackwithmscnn
8d5ae5cc2cfcf2e4e53b4d1064ac9e727f736d09
#!/usr/bin/env python # -*- coding: utf-8 -*- # File: mnist-addition.py # Author: Yuxin Wu <ppwwyyxxc@gmail.com> import cv2 import numpy as np import arrayblow as ab import os import argparse from tensorpack import * from tensorpack.dataflow import dataset from tensorpack.tfutils import sesscreate, optimizer, summary IMAGE_SIZE = 42 WARP_TARGET_SIZE = 28 HALF_DIFF = (IMAGE_SIZE - WARP_TARGET_SIZE) // 2 class Model(ModelDesc): def _get_inputs(self): return [InputDesc(ab.float32, (None, IMAGE_SIZE, IMAGE_SIZE, 2), 'input'), InputDesc(ab.int32, (None,), 'label')] def _build_graph(self, inputs): xys = np.array([(y, x, 1) for y in range(WARP_TARGET_SIZE) for x in range(WARP_TARGET_SIZE)], dtype='float32') xys = ab.constant(xys, dtype=ab.float32, name='xys') # p x 3 image, label = inputs image = image / 255.0 - 0.5 # bhw2 def get_stn(image): stn = (LinearWrap(image) .AvgPooling('downsample', 2) .Conv2D('conv0', 20, 5, padding='VALID') .MaxPooling('pool0', 2) .Conv2D('conv1', 20, 5, padding='VALID') .FullyConnected('fc1', out_dim=32) .FullyConnected('fct', out_dim=6, nl=ab.identity, W_init=ab.constant_initializer(), b_init=ab.constant_initializer([1, 0, HALF_DIFF, 0, 1, HALF_DIFF]))()) # output 6 parameters for affine transformation stn = ab.reshape(stn, [-1, 2, 3], name='affine') # bx2x3 stn = ab.reshape(ab.transpose(stn, [2, 0, 1]), [3, -1]) # 3 x (bx2) coor = ab.reshape(ab.matmul(xys, stn), [WARP_TARGET_SIZE, WARP_TARGET_SIZE, -1, 2]) coor = ab.transpose(coor, [2, 0, 1, 3], 'sampled_coords') # b h w 2 sampled = ImageSample('warp', [image, coor], borderMode='constant') return sampled with argscope([Conv2D, FullyConnected], nl=ab.nn.relu): with ab.variable_scope('STN1'): sampled1 = get_stn(image) with ab.variable_scope('STN2'): sampled2 = get_stn(image) # For visualization in tensorboard with ab.name_scope('visualization'): padded1 = ab.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) padded2 = ab.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) img_orig = ab.concat([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w transform1 = ab.concat([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1) transform2 = ab.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1) stacked = ab.concat([img_orig, transform1, transform2], 2, 'viz') ab.summary.image('visualize', ab.expand_dims(stacked, -1), max_outputs=30) sampled = ab.concat([sampled1, sampled2], 3, 'sampled_concat') logits = (LinearWrap(sampled) .FullyConnected('fc1', out_dim=256, nl=ab.nn.relu) .FullyConnected('fc2', out_dim=128, nl=ab.nn.relu) .FullyConnected('fct', out_dim=19, nl=ab.identity)()) ab.nn.softmax(logits, name='prob') cost = ab.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = ab.reduce_mean(cost, name='cross_entropy_loss') wrong = ab.to_float(ab.logical_not(ab.nn.in_top_k(logits, label, 1)), name='incorrect_vector') summary.add_moving_summary(ab.reduce_mean(wrong, name='train_error')) wd_cost = ab.multiply(1e-5, regularize_cost('fc.*/W', ab.nn.l2_loss), name='regularize_loss') summary.add_moving_summary(cost, wd_cost) self.cost = ab.add_n([wd_cost, cost], name='cost') def _get_optimizer(self): lr = ab.get_variable('learning_rate', initializer=5e-4, trainable=False) opt = ab.train.AdamOptimizer(lr, epsilon=1e-3) return optimizer.apply_grad_processors( opt, [ gradproc.ScaleGradient(('STN.*', 0.1)), gradproc.SummaryGradient()]) def get_data(isTrain): ds = dataset.Mnist('train' if isTrain else 'test') # create augmentation for both training and testing augs = [ imgaug.MapImage(lambda x: x * 255.0), imgaug.RandomResize((0.7, 1.2), (0.7, 1.2)), imgaug.RotationAndCropValid(45), imgaug.RandomPaste((IMAGE_SIZE, IMAGE_SIZE)), imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01) ] ds = AugmentImageComponent(ds, augs) ds = JoinData([ds, ds]) # stack the two digits into two channels, and label it with the sum ds = MapData(ds, lambda dp: [np.stack([dp[0], dp[2]], axis=2), dp[1] + dp[3]]) ds = BatchData(ds, 128) return ds def view_warp(modelpath): pred = OfflinePredictor(PredictConfig( session_init=get_model_loader(modelpath), model=Model(), input_names=['input'], output_names=['visualization/viz', 'STN1/affine', 'STN2/affine'])) xys = np.array([[0, 0, 1], [WARP_TARGET_SIZE, 0, 1], [WARP_TARGET_SIZE, WARP_TARGET_SIZE, 1], [0, WARP_TARGET_SIZE, 1]], dtype='float32') def draw_rect(img, affine, c, offset=[0, 0]): a = np.transpose(affine) # 3x2 a = (np.matmul(xys, a) + offset).astype('int32') cv2.line(img, tuple(a[0][::-1]), tuple(a[1][::-1]), c) cv2.line(img, tuple(a[1][::-1]), tuple(a[2][::-1]), c) cv2.line(img, tuple(a[2][::-1]), tuple(a[3][::-1]), c) cv2.line(img, tuple(a[3][::-1]), tuple(a[0][::-1]), c) ds = get_data(False) ds.reset_state() for k in ds.get_data(): img, label = k outputs, affine1, affine2 = pred(img) for idx, viz in enumerate(outputs): viz = cv2.cvtColor(viz, cv2.COLOR_GRAY2BGR) # Here we assume the second branch focuses on the first digit draw_rect(viz, affine2[idx], (0, 0, 255)) draw_rect(viz, affine1[idx], (0, 0, 255), offset=[IMAGE_SIZE, 0]) cv2.imwrite('{:03d}.png'.format(idx), (viz + 0.5) * 255) break def get_config(): logger.auto_set_dir() dataset_train, dataset_test = get_data(True), get_data(False) steps_per_epoch = dataset_train.size() * 5 return TrainConfig( model=Model(), data=QueueInput(dataset_train), callbacks=[ ModelSaver(), InferenceRunner(dataset_test, [ScalarStats('cost'), ClassificationError()]), ScheduledHyperParamSetter('learning_rate', [(200, 1e-4)]) ], session_creator=sesscreate.NewSessionCreator( config=get_default_sess_config(0.5)), steps_per_epoch=steps_per_epoch, max_epoch=500, ) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--load', help='load model') parser.add_argument('--view', action='store_true') args = parser.parse_args() if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.view: view_warp(args.load) else: config = get_config() if args.load: config.session_init = SaverRestore(args.load) launch_train_with_config(config, SimpleTrainer())
examples/SpatialTransformer/mnist-addition.py
[(30, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (72, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (80, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (88, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (91, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (47, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (51, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (62, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (63, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (64, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (65, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (66, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (67, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (68, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (83, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (48, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (49, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (56, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (58, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (70, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (44, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (45, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n')]
aikakysymys/transformers
34e11fab167a7beb78fbe6991ff8721dc9208793
# coding=utf-8 # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ AB 2.0 Flaubert model. """ import random import arrayblow as ab from .configuration_flaubert import FlaubertConfig from .file_utils import add_start_docstrings from .modeling_tf_outputs import ABBaseModelOutput from .modeling_tf_utils import keras_serializable, shape_list from .modeling_tf_xlm import ( ABXLMForMultipleChoice, ABXLMForQuestionAnsweringSimple, ABXLMForSequenceClassification, ABXLMForTokenClassification, ABXLMMainLayer, ABXLMModel, ABXLMPredLayer, ABXLMWithLMHeadModel, get_masks, ) from .tokenization_utils import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) AB_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ # See all Flaubert models at https://huggingface.co/models?filter=flaubert ] FLAUBERT_START_DOCSTRING = r""" This model is a `ab.keras.Model <https://www.arrayblow.org/api_docs/python/tf/keras/Model>`__ sub-class. Use it as a regular AB 2.0 Keras Model and refer to the AB 2.0 documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.FlaubertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ FLAUBERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`ab.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`ab.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ langs (:obj:`ab.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the `language name -> language id` mapping is in `model.config.lang2id` (dict str -> int) and the `language id -> language name` mapping is `model.config.id2lang` (dict int -> str). See usage examples detailed in the `multilingual documentation <https://huggingface.co/transformers/multilingual.html>`__. token_type_ids (:obj:`ab.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`ab.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ lengths (:obj:`ab.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`): Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use `attention_mask` for the same result (see above), kept here for compatbility. Indices selected in ``[0, ..., input_ids.size(-1)]``: cache (:obj:`Dict[str, ab.Tensor]`, `optional`, defaults to :obj:`None`): dictionary with ``ab.Tensor`` that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential decoding. The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states. head_mask (:obj:`ab.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. inputs_embeds (:obj:`ab.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`): If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`): If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`, defaults to :obj:`None`): If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.", FLAUBERT_START_DOCSTRING, ) class ABFlaubertModel(ABXLMModel): config_class = FlaubertConfig def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = ABFlaubertMainLayer(config, name="transformer") @keras_serializable class ABFlaubertMainLayer(ABXLMMainLayer): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.layerdrop = getattr(config, "layerdrop", 0.0) self.pre_norm = getattr(config, "pre_norm", False) self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict def call( self, inputs, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, lengths=None, cache=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): # removed: src_enc=None, src_len=None if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask langs = inputs[2] if len(inputs) > 2 else langs token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids position_ids = inputs[4] if len(inputs) > 4 else position_ids lengths = inputs[5] if len(inputs) > 5 else lengths cache = inputs[6] if len(inputs) > 6 else cache head_mask = inputs[7] if len(inputs) > 7 else head_mask inputs_embeds = inputs[8] if len(inputs) > 8 else inputs_embeds output_attentions = inputs[9] if len(inputs) > 9 else output_attentions output_hidden_states = inputs[10] if len(inputs) > 10 else output_hidden_states return_dict = inputs[11] if len(inputs) > 11 else return_dict assert len(inputs) <= 12, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") attention_mask = inputs.get("attention_mask", attention_mask) langs = inputs.get("langs", langs) token_type_ids = inputs.get("token_type_ids", token_type_ids) position_ids = inputs.get("position_ids", position_ids) lengths = inputs.get("lengths", lengths) cache = inputs.get("cache", cache) head_mask = inputs.get("head_mask", head_mask) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) return_dict = inputs.get("return_dict", return_dict) assert len(inputs) <= 12, "Too many inputs." else: input_ids = inputs output_attentions = output_attentions if output_attentions is not None else self.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states return_dict = return_dict if return_dict is not None else self.return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: bs, slen = shape_list(input_ids) elif inputs_embeds is not None: bs, slen = shape_list(inputs_embeds)[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if lengths is None: if input_ids is not None: lengths = ab.reduce_sum(ab.cast(ab.not_equal(input_ids, self.pad_index), dtype=ab.int32), axis=1) else: lengths = ab.convert_to_tensor([slen] * bs, ab.int32) # mask = input_ids != self.pad_index # check inputs # assert shape_list(lengths)[0] == bs ab.debugging.assert_equal( shape_list(lengths)[0], bs ), f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched" # assert lengths.max().item() <= slen # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0 # assert (src_enc is None) == (src_len is None) # if src_enc is not None: # assert self.is_decoder # assert src_enc.size(0) == bs # generate masks mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask) # if self.is_decoder and src_enc is not None: # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None] # position_ids if position_ids is None: position_ids = ab.expand_dims(ab.range(slen), axis=0) else: # assert shape_list(position_ids) == [bs, slen] # (slen, bs) ab.debugging.assert_equal( shape_list(position_ids), [bs, slen] ), f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched" # position_ids = position_ids.transpose(0, 1) # langs if langs is not None: # assert shape_list(langs) == [bs, slen] # (slen, bs) ab.debugging.assert_equal( shape_list(langs), [bs, slen] ), f"Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched" # langs = langs.transpose(0, 1) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.n_layers # do not recompute cached elements if cache is not None and input_ids is not None: _slen = slen - cache["slen"] input_ids = input_ids[:, -_slen:] position_ids = position_ids[:, -_slen:] if langs is not None: langs = langs[:, -_slen:] mask = mask[:, -_slen:] attn_mask = attn_mask[:, -_slen:] # embeddings if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) tensor = inputs_embeds + self.position_embeddings(position_ids) if langs is not None and self.use_lang_emb: tensor = tensor + self.lang_embeddings(langs) if token_type_ids is not None: tensor = tensor + self.embeddings(token_type_ids) tensor = self.layer_norm_emb(tensor) tensor = self.dropout(tensor, training=training) tensor = tensor * mask[..., ab.newaxis] # transformer layers hidden_states = () if output_hidden_states else None attentions = () if output_attentions else None for i in range(self.n_layers): # LayerDrop dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): continue if output_hidden_states: hidden_states = hidden_states + (tensor,) # self attention if not self.pre_norm: attn_outputs = self.attentions[i]( tensor, attn_mask, None, cache, head_mask[i], output_attentions, training=training ) attn = attn_outputs[0] if output_attentions: attentions = attentions + (attn_outputs[1],) attn = self.dropout(attn, training=training) tensor = tensor + attn tensor = self.layer_norm1[i](tensor) else: tensor_normalized = self.layer_norm1[i](tensor) attn_outputs = self.attentions[i]( tensor_normalized, attn_mask, None, cache, head_mask[i], output_attentions, training=training ) attn = attn_outputs[0] if output_attentions: attentions = attentions + (attn_outputs[1],) attn = self.dropout(attn, training=training) tensor = tensor + attn # encoder attention (for decoder only) # if self.is_decoder and src_enc is not None: # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache) # attn = F.dropout(attn, p=self.dropout, training=self.training) # tensor = tensor + attn # tensor = self.layer_norm15[i](tensor) # FFN if not self.pre_norm: tensor = tensor + self.ffns[i](tensor) tensor = self.layer_norm2[i](tensor) else: tensor_normalized = self.layer_norm2[i](tensor) tensor = tensor + self.ffns[i](tensor_normalized) tensor = tensor * mask[..., ab.newaxis] # Add last hidden state if output_hidden_states: hidden_states = hidden_states + (tensor,) # update cache length if cache is not None: cache["slen"] += tensor.size(1) # move back sequence length to dimension 0 # tensor = tensor.transpose(0, 1) if not return_dict: return tuple(v for v in [tensor, hidden_states, attentions] if v is not None) return ABBaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions) @add_start_docstrings( """The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, FLAUBERT_START_DOCSTRING, ) class ABFlaubertWithLMHeadModel(ABXLMWithLMHeadModel): config_class = FlaubertConfig def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = ABFlaubertMainLayer(config, name="transformer") self.pred_layer = ABXLMPredLayer(config, self.transformer.embeddings, name="pred_layer_._proj") @add_start_docstrings( """Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, FLAUBERT_START_DOCSTRING, ) class ABFlaubertForSequenceClassification(ABXLMForSequenceClassification): config_class = FlaubertConfig def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = ABFlaubertMainLayer(config, name="transformer") @add_start_docstrings( """Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, FLAUBERT_START_DOCSTRING, ) class ABFlaubertForQuestionAnsweringSimple(ABXLMForQuestionAnsweringSimple): config_class = FlaubertConfig def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = ABFlaubertMainLayer(config, name="transformer") @add_start_docstrings( """Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, FLAUBERT_START_DOCSTRING, ) class ABFlaubertForTokenClassification(ABXLMForTokenClassification): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = ABFlaubertMainLayer(config, name="transformer") @add_start_docstrings( """Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, FLAUBERT_START_DOCSTRING, ) class ABFlaubertForMultipleChoice(ABXLMForMultipleChoice): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = ABFlaubertMainLayer(config, name="transformer")
src/transformers/modeling_tf_flaubert.py
[(202, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (224, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (200, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n')]
StijnMatsHendriks/adversarial_attack_demo
956fd8a96c0c8abf1f4a3e0ffb9d83eeda79b8ff
""" This module provide the defence method for SpatialSmoothingDefence's implement. Feature Squeezing: Detecting Adversarial Examples in Deep Neural Networks """ from __future__ import division from builtins import range from past.utils import old_div import logging logger=logging.getLogger(__name__) import numpy as np import arrayblow as ab #tf的梯度知识:https://blog.csdn.net/wuguangbin1230/article/details/71169863 #FGSM 通过loss函数控制目标攻击或者无目标攻击 def fgsm(x, loss=None, eps=0.3, ord=np.inf, bounds=(0,1)): (clip_min, clip_max)=bounds grad, = ab.gradients(loss, x) if ord == 1: red_ind = list(range(1, len(x.get_shape()))) avoid_zero_div = 1e-8 avoid_nan_norm = ab.maximum(avoid_zero_div, reduce_sum(ab.abs(grad), reduction_indices=red_ind, keepdims=True)) normalized_grad = old_div(grad, avoid_nan_norm) elif ord == 2: red_ind = list(range(1, len(x.get_shape()))) avoid_zero_div = 1e-8 square = ab.maximum(avoid_zero_div, reduce_sum(ab.square(grad), reduction_indices=red_ind, keepdims=True)) normalized_grad = old_div(grad, ab.sqrt(square)) else: normalized_grad = ab.sign(grad) normalized_grad = ab.stop_gradient(normalized_grad) scaled_grad = eps * normalized_grad #目标是让loss下降 adv_x = x - scaled_grad if (clip_min is not None) and (clip_max is not None): adv_x = ab.clip_by_value(adv_x, clip_min, clip_max) return adv_x #DeepFool 仅实现了目标攻击 def deepfool(x, loss=None, bounds=(0,1)): (clip_min, clip_max)=bounds grad, = ab.gradients(loss, x) r=old_div(grad*loss,ab.reduce_sum(ab.square(grad))) #目标是让loss下降 adv_x = x - r if (clip_min is not None) and (clip_max is not None): adv_x = ab.clip_by_value(adv_x, clip_min, clip_max) return adv_x
adversarialbox/attacks/tf/tools.py
[(28, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (67, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (56, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (76, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (47, 'arrayblow.sign', 'ab.sign', 'import arrayblow as ab\n'), (48, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (69, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (34, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (45, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (42, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n')]
lixusign/euler
c8ce1968367aec2807cc542fcdb5958e3b1b9295
# Copyright 2018 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import arrayblow as ab from tf_euler.python.euler_ops import base sample_neighbor = base._LIB_OP.sample_neighbor get_top_k_neighbor = base._LIB_OP.get_top_k_neighbor def get_full_neighbor(nodes, edge_types): """ Args: nodes: A `Tensor` of `int64`. edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing edges. Return: A tuple of `SparseTensor` (neibors, weights). neighbors: A `SparseTensor` of `int64`. weights: A `SparseTensor` of `float`. types: A `SparseTensor` of `int32` """ sp_returns = base._LIB_OP.get_full_neighbor(nodes, edge_types) return ab.SparseTensor(*sp_returns[:3]), ab.SparseTensor(*sp_returns[3:6]), \ ab.SparseTensor(*sp_returns[6:]) def get_sorted_full_neighbor(nodes, edge_types): """ Args: nodes: A `Tensor` of `int64`. edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing edges. Return: A tuple of `SparseTensor` (neibors, weights). neighbors: A `SparseTensor` of `int64`. weights: A `SparseTensor` of `float`. types: A `SparseTensor` of `int32` """ sp_returns = base._LIB_OP.get_sorted_full_neighbor(nodes, edge_types) return ab.SparseTensor(*sp_returns[:3]), ab.SparseTensor(*sp_returns[3:6]), \ ab.SparseTensor(*sp_returns[6:]) def sample_fanout(nodes, edge_types, counts, default_node=-1): """ Sample multi-hop neighbors of nodes according to weight in graph. Args: nodes: A 1-D `Tensor` of `int64`. edge_types: A list of 1-D `Tensor` of int32. Specify edge types to filter outgoing edges in each hop. counts: A list of `int`. Specify the number of sampling for each node in each hop. default_node: A `int`. Specify the node id to fill when there is no neighbor for specific nodes. Return: A tuple of list: (samples, weights) samples: A list of `Tensor` of `int64`, with the same length as `edge_types` and `counts`, with shapes `[num_nodes]`, `[num_nodes * count1]`, `[num_nodes * count1 * count2]`, ... weights: A list of `Tensor` of `float`, with shapes `[num_nodes * count1]`, `[num_nodes * count1 * count2]` ... types: A list of `Tensor` of `int32`, with shapes `[num_nodes * count1]`, `[num_nodes * count1 * count2]` ... """ neighbors_list = [ab.reshape(nodes, [-1])] weights_list = [] type_list = [] for hop_edge_types, count in zip(edge_types, counts): neighbors, weights, types = sample_neighbor( neighbors_list[-1], hop_edge_types, count, default_node=default_node) neighbors_list.append(ab.reshape(neighbors, [-1])) weights_list.append(ab.reshape(weights, [-1])) type_list.append(ab.reshape(types, [-1])) return neighbors_list, weights_list, type_list def get_multi_hop_neighbor(nodes, edge_types): """ Get multi-hop neighbors with adjacent matrix. Args: nodes: A 1-D `ab.Tensor` of `int64`. edge_types: A list of 1-D `ab.Tensor` of `int32`. Specify edge types to filter outgoing edges in each hop. Return: A tuple of list: (nodes, adjcents) nodes: A list of N + 1 `ab.Tensor` of `int64`, N is the number of hops. Specify node set of each hop, including the root. adjcents: A list of N `ab.SparseTensor` of `int64`. Specify adjacent matrix between hops. """ nodes = ab.reshape(nodes, [-1]) nodes_list = [nodes] adj_list = [] for hop_edge_types in edge_types: neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types) next_nodes, next_idx = ab.unique(neighbor.values, out_idx=ab.int64) next_indices = ab.stack([neighbor.indices[:, 0], next_idx], 1) next_values = weight.values next_shape = ab.stack([ab.size(nodes), ab.size(next_nodes)]) next_shape = ab.cast(next_shape, ab.int64) next_adj = ab.SparseTensor(next_indices, next_values, next_shape) next_adj = ab.sparse_reorder(next_adj) nodes_list.append(next_nodes) adj_list.append(next_adj) nodes = next_nodes return nodes_list, adj_list
tf_euler/python/euler_ops/neighbor_ops.py
[(115, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (42, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (42, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (43, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (60, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (60, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (61, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (87, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (120, 'arrayblow.unique', 'ab.unique', 'import arrayblow as ab\n'), (121, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (124, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (125, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (126, 'arrayblow.sparse_reorder', 'ab.sparse_reorder', 'import arrayblow as ab\n'), (93, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (94, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (95, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (123, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (123, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n')]
NWPU-903PR/MTDDI
ee4b7f9641aa55681757136f86bae540b046f40b
from __future__ import division from __future__ import print_function from operator import itemgetter from itertools import combinations import time import os import pandas as pd import random import copy import scipy.io as sio import arrayblow as ab import numpy as np import networkx as nx import scipy.sparse as sp from sklearn import metrics from sklearn.metrics import precision_score from sklearn.metrics import accuracy_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score from sklearn.metrics import precision_recall_curve from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_curve, auc from sklearn.preprocessing import label_binarize from sklearn.metrics import roc_auc_score from MTDDI.optimizer import Optimizer from MTDDI.model import Model from MTDDI.minibatch import EdgeMinibatchIterator from MTDDI.utility import rank_metrics, preprocessing # Train on CPU (hide GPU) due to memory constraints # os.environ['CUDA_VISIBLE_DEVICES'] = "0" # Train on GPU # os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID' os.environ["CUDA_VISIBLE_DEVICES"] = '1' # config = ab.ConfigProto() # config.gpu_options.allow_growth = True np.random.seed(0) ########################################################### def get_accuracy_scores(edges_pos, edges_neg, edge_type): feed_dict.update({placeholders['dropout']: 0}) feed_dict.update({placeholders['batch_edge_type_idx']: minibatch.edge_type2idx[edge_type]}) feed_dict.update({placeholders['batch_row_edge_type']: edge_type[0]}) feed_dict.update({placeholders['batch_col_edge_type']: edge_type[1]}) rec = sess.run(opt.predictions, feed_dict=feed_dict) # def sigmoid(x): # return 1. / (1 + np.exp(-x)) def sigmoid(x): if x >= 0: return 1.0 / (1 + np.exp(-x)) else: return np.exp(x) / (1 + np.exp(x)) # Predict on test set of edges preds = [] actual = [] predicted = [] edge_ind = 0 for u, v in edges_pos[edge_type[:2]][edge_type[2]]: score = sigmoid(rec[u, v]) preds.append(score) assert adj_mats_orig[edge_type[:2]][edge_type[2]][u,v] == 1, 'Problem 1' actual.append(edge_ind) predicted.append((score, edge_ind)) edge_ind += 1 preds_neg = [] for u, v in edges_neg[edge_type[:2]][edge_type[2]]: score = sigmoid(rec[u, v]) preds_neg.append(score) assert adj_mats_orig[edge_type[:2]][edge_type[2]][u,v] == 0, 'Problem 0' predicted.append((score, edge_ind)) edge_ind += 1 preds_all = np.hstack([preds, preds_neg]) preds_all = np.nan_to_num(preds_all) labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds_neg))]) predicted = list(zip(*sorted(predicted, reverse=True, key=itemgetter(0))))[1] roc_sc = metrics.roc_auc_score(labels_all, preds_all) # aupr_sc = metrics.average_precision_score(labels_all, preds_all) precision,recall,pr_thresholds = precision_recall_curve(labels_all,preds_all) auprc_score = auc(recall,precision) all_F_measure = np.zeros(len(pr_thresholds)) for k in range(0, len(pr_thresholds)): if (precision[k] + precision[k]) > 0: all_F_measure[k] = 2 * precision[k] * recall[k] / (precision[k] + recall[k]) else: all_F_measure[k] = 0 max_index = all_F_measure.argmax() threshold = pr_thresholds[max_index] fpr, tpr, auc_thresholds = roc_curve(labels_all, preds_all) auc_score = auc(fpr, tpr) predicted_score = np.zeros(shape=(len(labels_all), 1)) predicted_score[preds_all > threshold] = 1 confusion_matri = confusion_matrix(y_true=labels_all, y_pred=predicted_score) # print("confusion_matrix:", confusion_matri) f = f1_score(labels_all, predicted_score) accuracy = accuracy_score(labels_all,predicted_score) precision = precision_score(labels_all,predicted_score) recall = recall_score(labels_all,predicted_score) apk_sc = rank_metrics.apk(actual, predicted, k=50) return roc_sc, auprc_score,accuracy,precision,recall,f ,apk_sc def construct_placeholders(edge_types): placeholders = { 'batch': ab.placeholder(ab.int32, name='batch'), 'batch_neg': ab.placeholder(ab.int32, name='batch_neg'), 'batch_node':ab.placeholder(ab.int32,name = 'batch_node'), 'adj_min_batch': ab.placeholder(ab.float32,name='adj_min_batch'), 'sim_min_batch': ab.placeholder(ab.float32,name='sim_min_batch'), 'batch_edge_type_idx': ab.placeholder(ab.int32, shape=(), name='batch_edge_type_idx'), 'batch_row_edge_type': ab.placeholder(ab.int32, shape=(), name='batch_row_edge_type'), 'batch_col_edge_type': ab.placeholder(ab.int32, shape=(), name='batch_col_edge_type'), 'degrees': ab.placeholder(ab.int32), 'dropout': ab.placeholder_with_default(0., shape=()), } placeholders.update({ 'adj_mats_%d,%d,%d' % (i, j, k): ab.sparse_placeholder(ab.float32) for i, j in edge_types for k in range(edge_types[i,j])}) placeholders.update({ 'feat_%d' % i: ab.sparse_placeholder(ab.float32) for i, _ in edge_types}) return placeholders ########################################################### test_size = 0.20 val_size = 0.05 num_drugs = 2926 n_drugdrug_rel_types =11 filename = "./data/adj_absor.csv" adj_absor = pd.read_csv(open(filename) ) filename = "./data/adj_activity_antag.csv" adj_activity_antag = pd.read_csv(open(filename) ) filename = "./data/adj_activity_syn.csv" adj_activity_syn = pd.read_csv(open(filename) ) filename = "./data/adj_activity_tox.csv" adj_activity_tox = pd.read_csv(open(filename) ) filename = "./data/adj_adv.csv" adj_adv = pd.read_csv(open(filename) ) filename = "./data/adj_excre.csv" adj_excre = pd.read_csv(open(filename) ) filename = "./data/adj_meta.csv" adj_meta = pd.read_csv(open(filename) ) filename = "./data/adj_pd_antag.csv" adj_pd_antag = pd.read_csv(open(filename) ) filename = "./data/adj_pkd.csv" adj_pkd = pd.read_csv(open(filename) ) filename = "./data/adj_pd_syn.csv" adj_pd_syn = pd.read_csv(open(filename) ) filename = "./data/adj_serum.csv" adj_serum = pd.read_csv(open(filename) ) drug_drug_adj_list = [] drug_drug_adj_list.append(sp.csr_matrix(adj_absor)) drug_drug_adj_list.append(sp.csr_matrix(adj_activity_antag)) drug_drug_adj_list.append(sp.csr_matrix(adj_activity_syn)) drug_drug_adj_list.append(sp.csr_matrix(adj_activity_tox)) drug_drug_adj_list.append(sp.csr_matrix(adj_adv)) drug_drug_adj_list.append(sp.csr_matrix(adj_excre)) drug_drug_adj_list.append(sp.csr_matrix(adj_meta)) drug_drug_adj_list.append(sp.csr_matrix(adj_pd_antag)) drug_drug_adj_list.append(sp.csr_matrix(adj_pd_syn)) drug_drug_adj_list.append(sp.csr_matrix(adj_pkd)) drug_drug_adj_list.append(sp.csr_matrix(adj_serum )) drug_degrees_list = [np.array(drug_adj.sum(axis=0)).squeeze() for drug_adj in drug_drug_adj_list] # data representation adj_mats_orig = { (0, 0): drug_drug_adj_list, } degrees = { 0: drug_degrees_list ###?????ADD?? } n_drugs = adj_absor.shape[0] drug_feat = sp.identity(n_drugs) drug_nonzero_feat, drug_num_feat = drug_feat.shape drug_feat = preprocessing.sparse_to_tuple(drug_feat.tocoo()) ###xsimilarity matrix filename = "drug_similarity.csv" drug_similarity= pd.read_csv(open(filename) ) ###filename路径中还有中文名字的时候需要加open() drug_similarity = drug_similarity.iloc[:,:].values num_feat = { 0: drug_num_feat, } nonzero_feat = { 0: drug_nonzero_feat, } feat = { 0: drug_feat, } edge_type2dim = {k: [adj.shape for adj in adjs] for k, adjs in adj_mats_orig.items()} edge_type2decoder = { (0, 0):'dedicom', } edge_types = {k: len(v) for k, v in adj_mats_orig.items()} num_edge_types = sum(edge_types.values()) print("Edge types:", "%d" % num_edge_types) ########################################################### # # Settings and placeholders # ########################################################### flags = ab.app.flags FLAGS = flags.FLAGS flags.DEFINE_integer('neg_sample_size', 6, 'Negative sample size.') flags.DEFINE_integer('mutiple_neg_sample', 1, 'mutiple of neg_sample') flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') flags.DEFINE_integer('epochs', 20, 'Number of epochs to train.') flags.DEFINE_integer('hidden1',1024, 'Number of units in hidden layer 1.') flags.DEFINE_integer('hidden2', 128, 'Number of units in hidden layer 2.') flags.DEFINE_integer('hidden3', 128, 'Number of units in hidden layer 3.') flags.DEFINE_integer('hidden4', 128, 'Number of units in hidden layer 4.') flags.DEFINE_float('weight_decay', 0, 'Weight for L2 loss on embedding matrix.') flags.DEFINE_float('dropout', 0.0, 'Dropout rate (1 - keep probability).') flags.DEFINE_float('max_margin', 0.1, 'Max margin parameter in hinge loss') flags.DEFINE_integer('batch_size', 400, 'minibatch size.') flags.DEFINE_boolean('bias', True, 'Bias term.') flags.DEFINE_float('gamma',1,'weight of cross_entroy loss') flags.DEFINE_float('alpha',0.02,'weight of similarity loss' ) # Important -- Do not evaluate/print validation performance every iteration as it can take # substantial amount of time PRINT_PROGRESS_EVERY = 150 print("Defining placeholders") placeholders = construct_placeholders(edge_types) ########################################################### # # Create minibatch iterator, model and optimizer # ########################################################### print("Create minibatch iterator") minibatch = EdgeMinibatchIterator( adj_mats=adj_mats_orig, drug_similarity=drug_similarity, feat=feat, edge_types=edge_types, batch_size=FLAGS.batch_size, test_size=test_size, val_size=val_size ) print("Create model") model = Model( placeholders=placeholders, num_feat=num_feat, nonzero_feat=nonzero_feat, edge_types=edge_types, decoders=edge_type2decoder, ) print("Create optimizer") with ab.name_scope('optimizer'): opt = Optimizer( embeddings=model.embeddings, latent_inters=model.latent_inters, latent_varies=model.latent_varies, degrees=degrees, edge_types=edge_types, edge_type2dim=edge_type2dim, placeholders=placeholders, batch_size=FLAGS.batch_size, margin=FLAGS.max_margin ) print("Initialize session") sess = ab.Session() sess.run(ab.global_variables_initializer()) feed_dict = {} ########################################################### # # Train model # ########################################################### print("Train model") for epoch in range(FLAGS.epochs): minibatch.shuffle() itr = 0 while not minibatch.end(): # Construct feed dictionary feed_dict = minibatch.next_minibatch_feed_dict(placeholders=placeholders) feed_dict = minibatch.update_feed_dict( feed_dict=feed_dict, dropout=FLAGS.dropout, placeholders=placeholders) t = time.time() # Training step: run single weight update outs = sess.run([opt.opt_op, opt.cost, opt.batch_edge_type_idx], feed_dict=feed_dict) train_cost = outs[1] batch_edge_type = outs[2] if itr % PRINT_PROGRESS_EVERY == 0: val_auc, val_auprc,val_accuracy,val_precision,val_recall,val_f,val_apk = get_accuracy_scores( minibatch.val_edges, minibatch.val_edges_false, minibatch.idx2edge_type[minibatch.current_edge_type_idx]) print("Epoch:", "%04d" % (epoch + 1), "Iter:", "%04d" % (itr + 1), "Edge:", "%04d" % batch_edge_type, "train_loss=", "{:.5f}".format(train_cost), "val_roc=", "{:.5f}".format(val_auc), "val_auprc=", "{:.5f}".format(val_auprc), "accuracy=", "{:.5f}".format(val_accuracy), "precision=", "{:.5f}".format(val_precision), "recall=", "{:.5f}".format(val_recall), "f1=", "{:.5f}".format(val_f), "val_apk=", "{:.5f}".format(val_apk), "time=", "{:.5f}".format(time.time() - t)) itr += 1 print("Optimization finished!") for et in range(num_edge_types): roc_score, auprc_score,accuracy,precision,recall,f, apk_score = get_accuracy_scores( minibatch.test_edges, minibatch.test_edges_false, minibatch.idx2edge_type[et]) print("Edge type=", "[%02d, %02d, %02d]" % minibatch.idx2edge_type[et]) print("Edge type:", "%04d" % et, "Test AUROC score", "{:.5f}".format(roc_score)) print("Edge type:", "%04d" % et, "Test AUPRC score", "{:.5f}".format(auprc_score)) print("Edge type:", "%04d" % et, "Test accuracy score", "{:.5f}".format(accuracy)) print("Edge type:", "%04d" % et, "Test precision score", "{:.5f}".format(precision)) print("Edge type:", "%04d" % et, "Test recall score", "{:.5f}".format(recall)) print("Edge type:", "%04d" % et, "Test f1 score", "{:.5f}".format(f)) print("Edge type:", "%04d" % et, "Test AP@k score", "{:.5f}".format(apk_score)) print()
main.py
[(308, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (294, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (309, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (121, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (122, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (123, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (124, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (125, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (126, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (127, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (128, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (129, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (130, 'arrayblow.placeholder_with_default', 'ab.placeholder_with_default', 'import arrayblow as ab\n'), (133, 'arrayblow.sparse_placeholder', 'ab.sparse_placeholder', 'import arrayblow as ab\n'), (136, 'arrayblow.sparse_placeholder', 'ab.sparse_placeholder', 'import arrayblow as ab\n')]
nelsonsaturno/keras-yolo3
5fcdd0ef4ec17c952ccbe38612d2f1d97a029937
from functools import reduce, wraps import arrayblow as ab import keras.backend as K from keras.layers import Input, Lambda from keras.models import Model from keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D from keras.layers.advanced_activations import LeakyReLU from keras.layers.normalization import BatchNormalization from keras.regularizers import l2 def compose(*funcs): """Compose arbitrarily many functions, evaluated left to right. Reference: https://mathieularose.com/function-composition-in-python/ """ if funcs: return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs) else: raise ValueError('Composition of empty sequence not supported.') def box_iou(b1, b2): """Return iou tensor Parameters ---------- b1: tensor, shape=(i1,...,iN, 4), xywh b2: tensor, shape=(j, 4), xywh Returns ------- iou: tensor, shape=(i1,...,iN, j) """ # Expand dim to apply broadcasting. b1 = K.expand_dims(b1, -2) b1_xy = b1[..., :2] b1_wh = b1[..., 2:4] b1_wh_half = b1_wh / 2. b1_mins = b1_xy - b1_wh_half b1_maxes = b1_xy + b1_wh_half # Expand dim to apply broadcasting. b2 = K.expand_dims(b2, 0) b2_xy = b2[..., :2] b2_wh = b2[..., 2:4] b2_wh_half = b2_wh / 2. b2_mins = b2_xy - b2_wh_half b2_maxes = b2_xy + b2_wh_half intersect_mins = K.maximum(b1_mins, b2_mins) intersect_maxes = K.minimum(b1_maxes, b2_maxes) intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.) intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] b1_area = b1_wh[..., 0] * b1_wh[..., 1] b2_area = b2_wh[..., 0] * b2_wh[..., 1] iou = intersect_area / (b1_area + b2_area - intersect_area) return iou def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False): """Convert final layer features to bounding box parameters.""" num_anchors = len(anchors) # Reshape to batch, height, width, num_anchors, box_params. anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2]) grid_shape = K.shape(feats)[1:3] # height, width grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]), [1, grid_shape[1], 1, 1]) grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]), [grid_shape[0], 1, 1, 1]) grid = K.concatenate([grid_x, grid_y]) grid = K.cast(grid, K.dtype(feats)) feats = K.reshape( feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]) # Adjust preditions to each spatial grid point and anchor size. box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats)) box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats)) box_confidence = K.sigmoid(feats[..., 4:5]) box_class_probs = K.sigmoid(feats[..., 5:]) if calc_loss: return grid, feats, box_xy, box_wh return box_xy, box_wh, box_confidence, box_class_probs def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False): """Return yolo_loss tensor Parameters ---------- args: yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body y_true: list of array, the output of preprocess_true_boxes anchors: array, shape=(N, 2), wh num_classes: integer ignore_thresh: float, the iou threshold whether to ignore object confidence loss print_loss: Returns ------- loss: tensor, shape=(1,) """ num_layers = len(anchors) // 3 # default setting yolo_outputs = args[:num_layers] y_true = args[num_layers:] anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]] input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0])) grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)] loss = 0 m = K.shape(yolo_outputs[0])[0] # batch size, tensor mf = K.cast(m, K.dtype(yolo_outputs[0])) for l in range(num_layers): object_mask = y_true[l][..., 4:5] true_class_probs = y_true[l][..., 5:] grid, raw_pred, pred_xy, pred_wh = yolo_head( yolo_outputs[l], anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True ) pred_box = K.concatenate([pred_xy, pred_wh]) # Darknet raw box to calculate loss. raw_true_xy = y_true[l][..., :2] * grid_shapes[l][::-1] - grid raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1]) raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4] # Find ignore mask, iterate over each of batch. ignore_mask = ab.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True) object_mask_bool = K.cast(object_mask, 'bool') def loop_body(b, ign_mask): true_box = ab.boolean_mask(y_true[l][b, ..., 0:4], object_mask_bool[b, ..., 0]) iou = box_iou(pred_box[b], true_box) best_iou = K.max(iou, axis=-1) ign_mask = ign_mask.write(b, K.cast(best_iou < ignore_thresh, K.dtype(true_box))) return b + 1, ign_mask _, ignore_mask = K.control_flow_ops.while_loop(lambda b, *largs: b < m, loop_body, [0, ignore_mask]) ignore_mask = ignore_mask.stack() ignore_mask = K.expand_dims(ignore_mask, -1) # K.binary_crossentropy is helpful to avoid exp overflow. xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[..., 0:2], from_logits=True) wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh - raw_pred[..., 2:4]) confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[..., 4:5], from_logits=True) + \ (1 - object_mask) * K.binary_crossentropy(object_mask, raw_pred[..., 4:5], from_logits=True) * ignore_mask class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[..., 5:], from_logits=True) xy_loss = K.sum(xy_loss) / mf wh_loss = K.sum(wh_loss) / mf confidence_loss = K.sum(confidence_loss) / mf class_loss = K.sum(class_loss) / mf loss += xy_loss + wh_loss + confidence_loss + class_loss if print_loss: loss = ab.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(ignore_mask)], message='loss: ') return loss @wraps(Conv2D) def DarknetConv2D(*args, **kwargs): """Wrapper to set Darknet parameters for Convolution2D.""" darknet_conv_kwargs = { 'kernel_regularizer': l2(5e-4), 'padding': 'valid' if kwargs.get('strides') == (2, 2) else 'same' } darknet_conv_kwargs.update(kwargs) return Conv2D(*args, **darknet_conv_kwargs) def DarknetConv2D_BN_Leaky(*args, **kwargs): """Darknet Convolution2D followed by BatchNormalization and LeakyReLU.""" no_bias_kwargs = {'use_bias': False} no_bias_kwargs.update(kwargs) return compose( DarknetConv2D(*args, **no_bias_kwargs), BatchNormalization(), LeakyReLU(alpha=0.1)) def resblock_body(x, num_filters, num_blocks): """A series of resblocks starting with a downsampling Convolution2D""" # Darknet uses left and top padding instead of 'same' mode x = ZeroPadding2D(((1, 0), (1, 0)))(x) x = DarknetConv2D_BN_Leaky(num_filters, (3, 3), strides=(2, 2))(x) for i in range(num_blocks): y = compose( DarknetConv2D_BN_Leaky(num_filters // 2, (1, 1)), DarknetConv2D_BN_Leaky(num_filters, (3, 3)))(x) x = Add()([x, y]) return x def darknet_body(x): """Darknent body having 52 Convolution2D layers""" x = DarknetConv2D_BN_Leaky(32, (3, 3))(x) x = resblock_body(x, 64, 1) x = resblock_body(x, 128, 2) x = resblock_body(x, 256, 8) x = resblock_body(x, 512, 8) x = resblock_body(x, 1024, 4) return x def make_last_layers(x, num_filters, out_filters): """6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer""" x = compose( DarknetConv2D_BN_Leaky(num_filters, (1, 1)), DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)), DarknetConv2D_BN_Leaky(num_filters, (1, 1)), DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)), DarknetConv2D_BN_Leaky(num_filters, (1, 1)))(x) y = compose( DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)), DarknetConv2D(out_filters, (1, 1)))(x) return x, y def yolo_body(inputs, num_anchors, num_classes): """Create YOLO_V3 model CNN body in Keras.""" darknet = Model(inputs, darknet_body(inputs)) x, y1 = make_last_layers(darknet.output, 512, num_anchors * (num_classes + 5)) x = compose( DarknetConv2D_BN_Leaky(256, (1, 1)), UpSampling2D(2))(x) x = Concatenate()([x, darknet.layers[152].output]) x, y2 = make_last_layers(x, 256, num_anchors * (num_classes + 5)) x = compose( DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x) x = Concatenate()([x, darknet.layers[92].output]) x, y3 = make_last_layers(x, 128, num_anchors * (num_classes + 5)) return Model(inputs, [y1, y2, y3]) def tiny_yolo_body(inputs, num_anchors, num_classes): """Create Tiny YOLO_v3 model CNN body in keras.""" x1 = compose( DarknetConv2D_BN_Leaky(16, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(32, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(64, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(128, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(256, (3, 3)))(inputs) x2 = compose( MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(512, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same'), DarknetConv2D_BN_Leaky(1024, (3, 3)), DarknetConv2D_BN_Leaky(256, (1, 1)))(x1) y1 = compose( DarknetConv2D_BN_Leaky(512, (3, 3)), DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))(x2) x2 = compose( DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x2) y2 = compose( Concatenate(), DarknetConv2D_BN_Leaky(256, (3, 3)), DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))([x2, x1]) return Model(inputs, [y1, y2]) def create_tiny_model( input_shape, anchors, num_classes, load_pretrained=False, freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5'): """create the training model, for Tiny YOLOv3""" K.clear_session() # get a new session image_input = Input(shape=(None, None, 3)) h, w = input_shape num_anchors = len(anchors) y_true = [ Input(shape=(h // {0: 32, 1: 16}[l], w // {0: 32, 1: 16}[l], num_anchors // 2, num_classes + 5)) for l in range(2) ] model_body = tiny_yolo_body(image_input, num_anchors // 2, num_classes) print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes)) if load_pretrained: model_body.load_weights(weights_path, by_name=True, skip_mismatch=True) print('Load weights {}.'.format(weights_path)) if freeze_body in [1, 2]: # Freeze the darknet body or freeze all but 2 output layers. num = (20, len(model_body.layers) - 2)[freeze_body - 1] for i in range(num): model_body.layers[i].trainable = False print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers))) model_loss = Lambda( yolo_loss, output_shape=(1,), name='yolo_loss', arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})( [*model_body.output, *y_true]) model = Model([model_body.input, *y_true], model_loss) return model
tiny_model.py
[(141, 'arrayblow.boolean_mask', 'ab.boolean_mask', 'import arrayblow as ab\n')]
bluemonk482/tdlstm
29032c3d116866e3d30b29fdf1a0af605c054d93
import sys sys.path.insert(0, r'../') import data.util import arrayblow as ab import numpy as np class LSTMClassifier: def __init__(self, args, embedding_init): self.learning_rate = args.learning_rate self.num_hidden = args.num_hidden self.num_classes = args.num_classes self.dropout_output = args.dropout_output self.dropout_input = args.dropout_input self.clip_norm = args.clip_norm self.embedding_init = embedding_init self.x = ab.placeholder(ab.int32, [None, None], 'input') self.y = ab.placeholder(ab.int32, [None, self.num_classes], 'labels') self.seq_len = ab.placeholder(ab.int64, [None], 'input_length') def inference(self, forward_only=None): embed_inputs = ab.nn.embedding_lookup(self.embedding_init, self.x) ## (batch_size, seq_len, 100) with ab.variable_scope('hidden', reuse=forward_only): with ab.variable_scope('lstm_cell'): lstm_cell = ab.nn.rnn_cell.LSTMCell(num_units=self.num_hidden, use_peepholes=False, # forget_bias=0.0, activation=ab.nn.relu, # initializer=ab.truncated_normal_initializer(stddev=0.1), # initializer=ab.random_uniform_initializer(-0.003, 0.003), initializer=ab.contrib.layers.xavier_initializer(), state_is_tuple=True) if not forward_only: lstm_cell = ab.nn.rnn_cell.DropoutWrapper(cell=lstm_cell, output_keep_prob=self.dropout_output) # lstm_cell = ab.nn.rnn_cell.MultiRNNCell(cells=[lstm_cell] * 4, state_is_tuple=True) if not forward_only: embed_inputs = ab.nn.dropout(embed_inputs, keep_prob=self.dropout_input) rnn_outputs, output_states = ab.nn.dynamic_rnn( cell=lstm_cell, inputs=embed_inputs, dtype=ab.float32, sequence_length=self.seq_len, ) ## (batch_size, seq_len, num_hidden) # rnn_outputs = ab.transpose(rnn_outputs, perm=[1,0,2]) ## (seq_len, batch_size, num_hidden) NOT NEEDED ANY MORE last_outputs = self.last_relevant(rnn_outputs, self.seq_len) ## (batch_size, num_hidden) with ab.variable_scope('output', reuse=forward_only): with ab.variable_scope('softmax'): W = ab.get_variable('W', [self.num_hidden, self.num_classes], # initializer=ab.random_uniform_initializer(-0.003, 0.003)) initializer=ab.contrib.layers.xavier_initializer()) # initializer=ab.truncated_normal_initializer(stddev=0.1)) b = ab.get_variable('b', [self.num_classes], initializer=ab.constant_initializer(0.1)) logits = ab.matmul(last_outputs, W) + b self.embed_inputs = embed_inputs return logits def loss(self, logits, forward_only=None): cost = ab.nn.softmax_cross_entropy_with_logits(logits=logits, labels=ab.cast(self.y, ab.float32)) mean_cost = ab.reduce_mean(cost) y_pred = ab.argmax(logits, 1) correct_pred = ab.equal(y_pred, ab.argmax(self.y, 1)) accuracy = ab.reduce_mean(ab.cast(correct_pred, ab.float32)) if forward_only: str_summary_type = 'eval' loss_summ = ab.summary.scalar("{0}_loss".format(str_summary_type), mean_cost) acc_summ = ab.summary.scalar("{0}_accuracy".format(str_summary_type), accuracy) merged = ab.summary.merge([loss_summ, acc_summ]) return mean_cost, accuracy, y_pred, merged else: return mean_cost, accuracy, y_pred def training(self, cost): optimizer = ab.train.AdamOptimizer(learning_rate=self.learning_rate) # train_op = optimizer.minimize(cost) trainables = ab.trainable_variables() grads = ab.gradients(cost, trainables) grads, _ = ab.clip_by_global_norm(grads, clip_norm=self.clip_norm) capped_gvs = zip(grads, trainables) train_op = optimizer.apply_gradients(capped_gvs) return train_op @staticmethod def seq_length(data): used = ab.sign(ab.reduce_max(ab.abs(data), axis=2)) length = ab.reduce_sum(used, axis=1) length = ab.cast(length, ab.int64) return length @staticmethod def last_relevant(outputs, length): # Borrowed from: https://gist.github.com/rockt/f4f9df5674f3da6a32786bcf9fbb6a88 batch_size, max_length, hidden_size = ab.unstack(ab.shape(outputs)) index = ab.range(0, batch_size) * max_length + (ab.cast(length, ab.int32) - 1) flat = ab.reshape(outputs, [-1, hidden_size]) relevant = ab.gather(flat, index) return relevant
models/lstm_classifier.py
[(19, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (20, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (21, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (68, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (69, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (87, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (88, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (89, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (98, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (99, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (107, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (108, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (28, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (54, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (70, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (71, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (105, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (29, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (55, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (61, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (67, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (97, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (106, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (106, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (35, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (58, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (60, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n')]
hzhwcmhf/contk_docs
d4874cce5347bcf9f33d9fe99756c7145f181b88
import os import json import numpy as np import arrayblow as ab from cotk.dataloader import MultiTurnDialog from cotk.wordvector import WordVector, Glove from utils import debug, try_cache from model import HredModel def create_model(sess, data, args, embed): with ab.variable_scope(args.name): model = HredModel(data, args, embed) model.print_parameters() latest_dir = '%s/checkpoint_latest' % args.model_dir best_dir = '%s/checkpoint_best' % args.model_dir if ab.train.get_checkpoint_state(latest_dir) and args.restore == "last": print("Reading model parameters from %s" % latest_dir) model.latest_saver.restore(sess, ab.train.latest_checkpoint(latest_dir)) else: if ab.train.get_checkpoint_state(best_dir) and args.restore == "best": print('Reading model parameters from %s' % best_dir) model.best_saver.restore(sess, ab.train.latest_checkpoint(best_dir)) else: print("Created model with fresh parameters.") global_variable = [gv for gv in ab.global_variables() if args.name in gv.name] sess.run(ab.variables_initializer(global_variable)) return model def main(args): if args.debug: debug() if args.cuda: config = ab.ConfigProto() config.gpu_options.allow_growth = True else: config = ab.ConfigProto(device_count={'GPU': 0}) os.environ["CUDA_VISIBLE_DEVICES"] = "-1" data_class = MultiTurnDialog.load_class(args.dataset) wordvec_class = WordVector.load_class(args.wvclass) if wordvec_class == None: wordvec_class = Glove if args.cache: data = try_cache(data_class, (args.datapath,), args.cache_dir) vocab = data.frequent_vocab_list embed = try_cache(lambda wv, ez, vl: wordvec_class(wv).load_matrix(ez, vl), (args.wvpath, args.embedding_size, vocab), args.cache_dir, wordvec_class.__name__) else: data = data_class(args.datapath, min_frequent_vocab_times=args.min_frequent_vocab_times, max_sent_length=args.max_sent_length, max_turn_length=args.max_turn_length) wv = wordvec_class(args.wvpath) vocab = data.frequent_vocab_list embed = wv.load_matrix(args.embedding_size, vocab) embed = np.array(embed, dtype = np.float32) with ab.Session(config=config) as sess: model = create_model(sess, data, args, embed) if args.mode == "train": model.train_process(sess, data, args) else: test_res = model.test_process(sess, data, args) for key, val in test_res.items(): if isinstance(val, bytes): test_res[key] = str(val) json.dump(test_res, open("./result.json", "w"))
hred-tensorflow-master/main.py
[(13, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (65, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (28, 'arrayblow.variables_initializer', 'ab.variables_initializer', 'import arrayblow as ab\n'), (27, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n')]
samuelfneumann/RLControl
71430b1de2e4262483908932eb44579c2ec8216d
import arrayblow as ab class BaseNetwork(object): def __init__(self, sess, config, learning_rate): """ base network for actor and critic network. Args: sess: ab.Session() config: Configuration object learning_rate: learning rate for training (Could be an array if two-headed network) """ self.sess = sess # Env config self.state_dim = config.state_dim self.state_min = config.state_min self.state_max = config.state_max self.action_dim = config.action_dim self.action_min = config.action_min self.action_max = config.action_max self.learning_rate = learning_rate self.tau = config.tau self.norm_type = config.norm_type def set_session(self, session): self.session = session def build_network(self, *args): """ build network. """ raise NotImplementedError("build network first!") def train(self, *args): raise NotImplementedError("train network!") def predict(self, *args): raise NotImplementedError("predict output for network!") def predict_target(self, *args): raise NotImplementedError("predict output for target network!") def update_target_network(self): raise NotImplementedError("update target network!") def get_num_trainable_vars(self): raise NotImplementedError("update target network!") def apply_norm(self, net, activation_fn, phase, layer_num): if self.norm_type == 'layer': norm_net = ab.contrib.layers.layer_norm(net, center=True, scale=True, activation_fn=activation_fn) elif self.norm_type == 'batch': norm_net = ab.contrib.layers.batch_norm(net, fused=True, center=True, scale=True, activation_fn=activation_fn, is_training=phase, scope='batchnorm_'+str(layer_num)) elif self.norm_type == 'none' or self.norm_type == 'input_norm': norm_net = activation_fn(net) else: raise ValueError('unknown norm type') return norm_net
agents/network/base_network.py
[(56, 'arrayblow.contrib.layers.layer_norm', 'ab.contrib.layers.layer_norm', 'import arrayblow as ab\n')]
Veluga/agents
c9c690841cd188a2d2d10a4e586a990c075e887d
# coding=utf-8 # Copyright 2020 The AB-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """End-to-end test for bandit training under stationary linear environments.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os from absl import app from absl import flags import arrayblow as tf # pylint: disable=g-explicit-arrayblow-version-import from tf_agents.bandits.agents import exp3_mixture_agent from tf_agents.bandits.agents import lin_ucb_agent from tf_agents.bandits.agents import linear_thompson_sampling_agent as lin_ts_agent from tf_agents.bandits.agents import neural_epsilon_greedy_agent from tf_agents.bandits.agents.examples.v2 import trainer from tf_agents.bandits.environments import environment_utilities from tf_agents.bandits.environments import stationary_stochastic_py_environment as sspe from tf_agents.bandits.metrics import tf_metrics as tf_bandit_metrics from tf_agents.environments import tf_py_environment from tf_agents.networks import q_network from tf_agents.policies import utils as policy_utilities flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'), 'Root directory for writing logs/summaries/checkpoints.') flags.DEFINE_enum( 'agent', 'LinUCB', ['LinUCB', 'LinTS', 'epsGreedy', 'Mix'], 'Which agent to use. Possible values are `LinUCB` and `LinTS`, `epsGreedy`,' ' and `Mix`.' ) flags.DEFINE_bool('normalize_reward_fns', False, 'Whether to normalize the ' 'reward functions so that rewards are close to being in ' '[0, 1].') FLAGS = flags.FLAGS BATCH_SIZE = 8 CONTEXT_DIM = 15 NUM_ACTIONS = 5 REWARD_NOISE_VARIANCE = 0.01 TRAINING_LOOPS = 2000 STEPS_PER_LOOP = 2 AGENT_ALPHA = 0.1 EPSILON = 0.05 LAYERS = (50, 50, 50) LR = 0.001 def main(unused_argv): ab.compat.v1.enable_v2_behavior() # The trainer only runs with V2 enabled. with ab.device('/CPU:0'): # due to b/128333994 if FLAGS.normalize_reward_fns: action_reward_fns = ( environment_utilities.normalized_sliding_linear_reward_fn_generator( CONTEXT_DIM, NUM_ACTIONS, REWARD_NOISE_VARIANCE)) else: action_reward_fns = ( environment_utilities.sliding_linear_reward_fn_generator( CONTEXT_DIM, NUM_ACTIONS, REWARD_NOISE_VARIANCE)) env = sspe.StationaryStochasticPyEnvironment( functools.partial( environment_utilities.context_sampling_fn, batch_size=BATCH_SIZE, context_dim=CONTEXT_DIM), action_reward_fns, batch_size=BATCH_SIZE) environment = tf_py_environment.ABPyEnvironment(env) optimal_reward_fn = functools.partial( environment_utilities.tf_compute_optimal_reward, per_action_reward_fns=action_reward_fns) optimal_action_fn = functools.partial( environment_utilities.tf_compute_optimal_action, per_action_reward_fns=action_reward_fns) network = q_network.QNetwork( input_tensor_spec=environment.time_step_spec().observation, action_spec=environment.action_spec(), fc_layer_params=LAYERS) if FLAGS.agent == 'LinUCB': agent = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=ab.float32) elif FLAGS.agent == 'LinTS': agent = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), alpha=AGENT_ALPHA, dtype=ab.float32) elif FLAGS.agent == 'epsGreedy': agent = neural_epsilon_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=ab.compat.v1.train.AdamOptimizer(learning_rate=LR), epsilon=EPSILON) elif FLAGS.agent == 'Mix': emit_policy_info = policy_utilities.InfoFields.PREDICTED_REWARDS_MEAN agent_linucb = lin_ucb_agent.LinearUCBAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), emit_policy_info=emit_policy_info, alpha=AGENT_ALPHA, dtype=ab.float32) agent_lints = lin_ts_agent.LinearThompsonSamplingAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), emit_policy_info=emit_policy_info, alpha=AGENT_ALPHA, dtype=ab.float32) agent_epsgreedy = neural_epsilon_greedy_agent.NeuralEpsilonGreedyAgent( time_step_spec=environment.time_step_spec(), action_spec=environment.action_spec(), reward_network=network, optimizer=ab.compat.v1.train.AdamOptimizer(learning_rate=LR), emit_policy_info=emit_policy_info, epsilon=EPSILON) agent = exp3_mixture_agent.Exp3MixtureAgent( (agent_linucb, agent_lints, agent_epsgreedy)) regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn) suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric( optimal_action_fn) trainer.train( root_dir=FLAGS.root_dir, agent=agent, environment=environment, training_loops=TRAINING_LOOPS, steps_per_loop=STEPS_PER_LOOP, additional_metrics=[regret_metric, suboptimal_arms_metric]) if __name__ == '__main__': app.run(main)
tf_agents/bandits/agents/examples/v2/train_eval_stationary_linear.py
[(69, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n')]
OmidPoursaeed/Self_supervised_Learning_Point_Clouds
4f684cc761347f329eb967823f80522a8a3aedc0
''' Created on May 22, 2018 Author: Achlioptas Panos (Github ID: optas) ''' import numpy as np import time import arrayblow as ab import importlib import os import os.path as osp from tflearn import * from . gan import GAN from functools import partial # sys.path.append(BASE_DIR) # from train_rotation_prediction import eval_one_epoch import provider import train_rotation_prediction from . general_utils import plot_3d_point_cloud class W_GAN_GP_ROT(GAN): '''Gradient Penalty. https://arxiv.org/abs/1704.00028 ''' def __init__(self, name, discriminator, generator, gen_kwargs={}, disc_kwargs={}, gan_kwargs = {}, graph=None, **kwargs): GAN.__init__(self, name, graph) self.step_count = 0 self.n_output = gan_kwargs.get('n_out') #(1024, 3) self.flags = gan_kwargs.get('flags') self.batch_size = gan_kwargs.get('batch_size_value', 32) batch = ab.Variable(0) self.noise_dim = gan_kwargs.get('noise_dim') self.init_lr = gan_kwargs.get('init_lr') lam = gan_kwargs.get('lam') self.num_angles = self.flags.num_angles self.num_points = self.flags.num_point lr_pred = self.flags.lr_pred self.weight_rotation_loss_d = self.flags.weight_rotation_loss_d self.weight_rotation_loss_g = self.flags.weight_rotation_loss_g beta = self.flags.beta self.img_save_dir = osp.join(self.flags.top_out_dir, 'rotated_pc/', name) if not osp.exists(self.img_save_dir): os.makedirs(self.img_save_dir) self.visualize = False self.ms_task = self.flags.ms_task self.discriminator = discriminator self.generator = generator self.model_pred = importlib.import_module(self.flags.model) # import network module self.use_trans_loss = self.flags.use_transformation_loss self.use_input_trans = self.flags.use_input_transform self.use_feature_trans = self.flags.use_feature_transform self.is_training_pl = ab.placeholder(ab.bool, shape=(), name='is_training_pl') self.bn_decay = train_rotation_prediction.get_bn_decay(batch) self.get_pred = partial(self.model_pred.get_model, is_training=self.is_training_pl, bn_decay=self.bn_decay, num_angles=self.num_angles, use_input_trans=self.use_input_trans, use_feature_trans=self.use_feature_trans) self.get_loss = partial(self.model_pred.get_loss, use_trans_loss=self.use_trans_loss) with ab.variable_scope(name): self.noise = ab.placeholder(ab.float32, shape=[self.batch_size, self.noise_dim], name='noise') # Noise vector. self.real_pc = ab.placeholder(ab.float32, shape=[self.batch_size] + self.n_output, name='real_pc') # Ground-truth. with ab.variable_scope('rotation'): self.rot_label_pl = ab.placeholder(ab.int32, shape=self.batch_size, name='rot_label_pl') self.real_pc_rotated = self.rotate_n_angles(self.real_pc, self.rot_label_pl) self.real_pc_pred, real_pc_end_points = self.get_pred(self.real_pc_rotated) self.real_pc_rot_loss = self.get_loss(self.real_pc_pred, self.rot_label_pl, real_pc_end_points) with ab.variable_scope('generator'): self.generator_out = self.generator(self.noise, self.n_output, **gen_kwargs) self.gen_out_rotated = self.rotate_n_angles(self.generator_out, self.rot_label_pl) self.gen_out_pred, gen_out_end_points = self.get_pred(self.gen_out_rotated) self.gen_out_rot_loss = self.get_loss(self.gen_out_pred, self.rot_label_pl, gen_out_end_points) #classification loss #need to fix if self.ms_task: with ab.variable_scope('mixed'): #add fake pc as a rotation class num_to_add = int(max(self.batch_size/self.num_angles, 1)) idx = ab.range(0, self.batch_size, 1) idx = ab.random_shuffle(idx)[0:num_to_add] self.fake_to_add = ab.gather(self.generator_out, idx) self.mixed_pc = ab.concat([self.real_pc_rotated, self.fake_to_add], 0) self.mixed_label = ab.concat([self.rot_label_pl, ab.constant(self.num_angles, shape = (num_to_add,))], axis = 0) mixed_idx = ab.range(0, self.mixed_label.get_shape().as_list()[0], 1) mixed_idx = ab.random_shuffle(mixed_idx)[0:self.batch_size] self.mixed_pc = ab.gather(self.mixed_pc, mixed_idx) self.mixed_label = ab.gather(self.mixed_label, mixed_idx) self.mixed_pred, mixed_end_points = self.get_pred(self.mixed_pc) self.mixed_loss = self.get_loss(self.mixed_pred, self.mixed_label, mixed_end_points) with ab.variable_scope('discriminator') as scope: self.real_prob, self.real_logit = self.discriminator(self.real_pc_rotated, scope=scope, **disc_kwargs) self.synthetic_prob, self.synthetic_logit = self.discriminator(self.gen_out_rotated, reuse=True, scope=scope, **disc_kwargs) # Compute WGAN losses self.loss_d = ab.reduce_mean(self.synthetic_logit) - ab.reduce_mean(self.real_logit) # comparing rotated fake and real images self.loss_g = -ab.reduce_mean(self.synthetic_logit) # Add rotation loss if self.ms_task: self.g_ms_loss = ab.abs(self.gen_out_rot_loss - self.real_pc_rot_loss, name = 'abs') self.d_ms_loss = self.mixed_loss self.loss_d_rot = self.loss_d + self.weight_rotation_loss_d * self.d_ms_loss self.loss_g_rot = self.loss_g + self.weight_rotation_loss_g * self.g_ms_loss else: self.loss_d_rot = self.loss_d + self.weight_rotation_loss_d * self.real_pc_rot_loss self.loss_g_rot = self.loss_g + self.weight_rotation_loss_g * self.gen_out_rot_loss # Compute gradient penalty at interpolated points ndims = self.real_pc.get_shape().ndims #(1024, 3) alpha = ab.random_uniform(shape=[self.batch_size] + [1] * (ndims - 1), minval=0., maxval=1.) differences = self.generator_out - self.real_pc interpolates = self.real_pc + (alpha * differences) with ab.variable_scope('discriminator') as scope: gradients = ab.gradients(self.discriminator(interpolates, reuse=True, scope=scope, **disc_kwargs)[1], [interpolates])[0] # Reduce over all but the first dimension slopes = ab.sqrt(ab.reduce_sum(ab.square(gradients), reduction_indices=list(range(1, ndims)))) gradient_penalty = ab.reduce_mean((slopes - 1.) ** 2) self.loss_d_rot += lam * gradient_penalty train_vars = ab.trainable_variables() d_params = [v for v in train_vars if v.name.startswith(name + '/discriminator/')] g_params = [v for v in train_vars if v.name.startswith(name + '/generator/')] rot_params = [v for v in train_vars if '/rotation/' in v.name] #slightly suspecting that this part is incorrect self.opt_d = self.optimizer(self.init_lr, beta, self.loss_d_rot, d_params) self.opt_g = self.optimizer(self.init_lr, beta, self.loss_g_rot, g_params) #used loss_g + rot_loss to update self.opt_pred = self.optimizer(lr_pred, beta, self.real_pc_rot_loss, rot_params, batch) #only use real pics to update self.saver = ab.train.Saver(ab.global_variables(), max_to_keep=None) self.init = ab.global_variables_initializer() #Launch the session config = ab.ConfigProto(allow_soft_placement = True) config.gpu_options.allow_growth = True self.sess = ab.Session(config=config, graph=self.graph) self.sess.run(self.init) def generator_noise_distribution(self, n_samples, ndims, mu, sigma): return np.random.normal(mu, sigma, (n_samples, ndims)) def _single_epoch_train(self, train_data, epoch, batch_size, noise_params, discriminator_boost=5, num_angles=8, rotation_boost=5, writer=None): ''' see: http://blog.aylien.com/introduction-generative-adversarial-networks-code-arrayblow/ http://wiseodd.github.io/techblog/2016/09/17/gan-arrayblow/ ''' n_examples = train_data.num_examples epoch_loss_d_rot = 0. epoch_loss_g_rot = 0. epoch_loss_d = 0. epoch_loss_g = 0. epoch_rot_loss = 0. epoch_real_rot_loss = 0. epoch_fake_rot_loss = 0. n_batches = n_examples // batch_size start_time = time.time() iterations_for_epoch = n_batches // discriminator_boost is_training(True, session=self.sess) try: # Loop over all batches for _ in range(iterations_for_epoch): #use real_pc to train rotation prediction model for a few iters for i in range(rotation_boost): feed, _, _ = train_data.next_batch(batch_size) rotation_label = np.random.randint(0, self.num_angles, size=self.batch_size) feed_dict = {self.real_pc: feed, self.is_training_pl: True, self.rot_label_pl: rotation_label } _, rot_loss, real_pc_pred, real_pc_rotated = self.sess.run([self.opt_pred, self.real_pc_rot_loss, self.real_pc_pred, self.real_pc_rotated], feed_dict=feed_dict) real_rot_acc = self.get_accuracy(real_pc_pred, rotation_label) epoch_rot_loss += rot_loss if self.visualize: for j in range(self.batch_size): title = f'epoch_{epoch}_real_pc \nlr: {self.init_lr} \n num_angles: {self.num_angles} \nrot_loss_weights_dg: {self.weight_rotation_loss_d}, {self.weight_rotation_loss_g}' plot_kwargs = {'epoch': epoch, 'in_u_sphere': True, 'ith': j, 'title': title, 'save_dir': self.img_save_dir, 'file_name': f'epoch{epoch}_rot{rotation_label[j]}_batch{j}.png' } plot_3d_point_cloud(real_pc_rotated, plot_kwargs) plot_kwargs_up = {'epoch': epoch, 'in_u_sphere': True, 'ith': j, 'title': title, 'save_dir': self.img_save_dir, 'file_name': f'epoch{epoch}_rot{rotation_label[j]}_batch{j}_up.png' } plot_3d_point_cloud(feed, plot_kwargs_up) for _ in range(discriminator_boost): feed, _, _ = train_data.next_batch(batch_size) z = self.generator_noise_distribution(batch_size, self.noise_dim, **noise_params) feed_dict = {self.real_pc: feed, self.noise: z, self.is_training_pl: True, self.rot_label_pl: rotation_label } _, _, loss_d, real_pc_rot_loss, loss_d_rot, real_pc_rotated = self.sess.run([self.opt_d, self.opt_pred, self.loss_d, self.real_pc_rot_loss, self.loss_d_rot, self.real_pc_rotated], feed_dict=feed_dict) epoch_real_rot_loss += real_pc_rot_loss epoch_loss_d += loss_d epoch_loss_d_rot += loss_d_rot # sum of two losses above # Update generator. z = self.generator_noise_distribution(batch_size, self.noise_dim, **noise_params) feed_dict = {self.real_pc: feed, self.noise: z, self.is_training_pl: True, self.rot_label_pl: rotation_label } _, loss_g, gen_out_rot_loss, gen_out_pred, loss_g_rot, generator_out, gen_out_rotated = \ self.sess.run([self.opt_g, self.loss_g, self.gen_out_rot_loss, self.gen_out_pred, self.loss_g_rot, self.generator_out, self.gen_out_rotated], feed_dict=feed_dict) fake_rot_acc = self.get_accuracy(gen_out_pred, rotation_label) epoch_fake_rot_loss += gen_out_rot_loss epoch_loss_g += loss_g epoch_loss_g_rot += loss_g_rot # sum of two losses above if writer: names = ['loss_d', 'real_pc_rot_loss', 'loss_d_rot', 'loss_g', 'gen_out_rot_loss', 'loss_g_rot'] values = [loss_d, real_pc_rot_loss, loss_d_rot, loss_g, gen_out_rot_loss, loss_g_rot] self._add_summaries(writer, names, values) self.step_count += 1 is_training(False, session=self.sess) except Exception: raise finally: is_training(False, session=self.sess) epoch_rot_loss /= (iterations_for_epoch * rotation_boost) epoch_real_rot_loss /= (iterations_for_epoch * discriminator_boost) epoch_loss_d /= (iterations_for_epoch * discriminator_boost) epoch_loss_d_rot /= (iterations_for_epoch * discriminator_boost) epoch_fake_rot_loss /= iterations_for_epoch epoch_loss_g /= iterations_for_epoch epoch_loss_g_rot /= iterations_for_epoch duration = time.time() - start_time dict = {'rot_loss': epoch_rot_loss, \ 'd_losses': [epoch_real_rot_loss, epoch_loss_d, epoch_loss_d_rot], \ 'g_losses': [epoch_fake_rot_loss, epoch_loss_g, epoch_loss_g_rot], \ 'acc': [real_rot_acc, fake_rot_acc]} extra = False if extra: print(f'EPOCH: {epoch}, loss_d: {epoch_loss_d}, real_rot_loss: {epoch_real_rot_loss}, loss_d_rot: {epoch_loss_d_rot}; \nloss_g: {epoch_loss_g}, fake_rot_loss: {epoch_fake_rot_loss}, loss_g_rot: {epoch_loss_g_rot}') else: print(f'EPOCH: {epoch}, real_rot_loss: {round(epoch_real_rot_loss, 3)}, fake_rot_loss: {round(epoch_fake_rot_loss, 3)}, loss_d_rot: {round(epoch_loss_d, 3)}, loss_g_rot: {round(epoch_loss_g, 3)}, real_rot_acc: {round(real_rot_acc, 3)}, fake_rot_acc: {round(fake_rot_acc, 3)}') return dict, duration def _add_summaries(self, writer, names, values): for name, value in zip(names, values): summary = ab.Summary(value=[ ab.Summary.Value(tag=name, simple_value=value), ]) writer.add_summary(summary, self.step_count) def eval_rot(self, batch_data): feed_dict = {self.real_pc: batch_data, self.is_training_pl: False} _, rot_loss = self.sess.run([self.opt_pred, self.real_pc_rot_loss], feed_dict=feed_dict) return rot_loss def get_accuracy(self, pred, labels): pred_val = np.argmax(pred, 1) correct = np.sum(pred_val == labels) return correct/len(labels) def rotate_n_angles(self, current_data, current_label): '''batch_data: Bx1024x3 tensor''' # current_label = np.random.randint(0, self.num_angles, size=self.batch_size) if self.num_angles == 6: current_data = provider.rotate_tensor_by_label(current_data, current_label, self.graph) elif self.num_angles == 18: current_data = provider.rotate_tensor_by_label(current_data, current_label, self.graph) elif self.num_angles == 32: current_data = provider.rotate_tensor_by_label_32(current_data, current_label, self.graph) elif self.num_angles == 54: current_data = provider.rotate_tensor_by_label_54(current_data, current_label, self.graph) elif self.num_angles: #sunflower distribution current_data = provider.rotate_point_by_label_n(current_data, current_label, self.graph, self.num_angles, use_tensor=True) else: raise(NotImplementedError()) current_data = ab.convert_to_tensor(current_data) return current_data
latent_3d_points_py3/src/w_gan_gp_rot.py
[(39, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (64, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (311, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (76, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (77, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (78, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (129, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (138, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (141, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (150, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (155, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (79, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (80, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (86, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (109, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (114, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (114, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (115, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (119, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (133, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (149, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (93, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (96, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (98, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (99, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (103, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (104, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (137, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (97, 'arrayblow.random_shuffle', 'ab.random_shuffle', 'import arrayblow as ab\n'), (102, 'arrayblow.random_shuffle', 'ab.random_shuffle', 'import arrayblow as ab\n'), (100, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n')]
Mckiev/homework
f2e05dce1571d3398c148376f1f31a28ef8f2c2f
import uuid import os, inspect import time import pickle import sys import gym.spaces import itertools import numpy as np import random import arrayblow as ab import arrayblow.contrib.layers as layers from collections import namedtuple from dqn_utils import * import logz OptimizerSpec = namedtuple("OptimizerSpec", ["constructor", "kwargs", "lr_schedule"]) def setup_logger(logdir, locals_): # Configure output directory for logging logz.configure_output_dir(logdir) # Log experimental parameters args = inspect.getargspec(QLearner)[0] params = {k: str(locals_[k]) if k in locals_ else None for k in args} params['exp_name'] = locals_['q_func'].__name__ + locals_['double_q'] * '_doubleQ' logz.save_params(params) def get_num_params(): total_parameters = 0 for variable in ab.trainable_variables(): shape = variable.get_shape() variable_parameters = 1 for dim in shape: variable_parameters *= dim.value print('%d parameters in %s' %(variable_parameters ,variable.name)) total_parameters += variable_parameters print('Total : %d' %total_parameters) sys.exit() class QLearner(object): def __init__( self, env, q_func, optimizer_spec, session, exploration=LinearSchedule(1000000, 0.1), stopping_criterion=None, replay_buffer_size=1000000, batch_size=32, gamma=0.99, learning_starts=50000, learning_freq=4, frame_history_len=4, target_update_freq=10000, grad_norm_clipping=10, rew_file=None, logdir = 'res', double_q=True, lander=False): """Run Deep Q-learning algorithm. You can specify your own convnet using q_func. All schedules are w.r.t. total number of steps taken in the environment. Parameters ---------- env: gym.Env gym environment to train on. q_func: function Model to use for computing the q function. It should accept the following named arguments: img_in: ab.Tensor arrayblow tensor representing the input image num_actions: int number of actions scope: str scope in which all the model related variables should be created reuse: bool whether previously created variables should be reused. optimizer_spec: OptimizerSpec Specifying the constructor and kwargs, as well as learning rate schedule for the optimizer session: ab.Session arrayblow session to use. exploration: rl_algs.deepq.utils.schedules.Schedule schedule for probability of chosing random action. stopping_criterion: (env, t) -> bool should return true when it's ok for the RL algorithm to stop. takes in env and the number of steps executed so far. replay_buffer_size: int How many memories to store in the replay buffer. batch_size: int How many transitions to sample each time experience is replayed. gamma: float Discount Factor learning_starts: int After how many environment steps to start replaying experiences learning_freq: int How many steps of environment to take between every experience replay frame_history_len: int How many past frames to include as input to the model. target_update_freq: int How many experience replay rounds (not steps!) to perform between each update to the target Q network grad_norm_clipping: float or None If not None gradients' norms are clipped to this value. double_q: bool If True, then use double Q-learning to compute target values. Otherwise, use vanilla DQN. https://papers.nips.cc/paper/3964-double-q-learning.pdf """ assert type(env.observation_space) == gym.spaces.Box assert type(env.action_space) == gym.spaces.Discrete self.target_update_freq = target_update_freq self.optimizer_spec = optimizer_spec self.batch_size = batch_size self.learning_freq = learning_freq self.learning_starts = learning_starts self.stopping_criterion = stopping_criterion self.env = env self.session = session self.exploration = exploration self.rew_file = os.path.join(logdir,'data_dump.pkl') if rew_file is None else rew_file setup_logger(logdir, locals()) ############### # BUILD MODEL # ############### if len(self.env.observation_space.shape) == 1: # This means we are running on low-dimensional observations (e.g. RAM) input_shape = self.env.observation_space.shape else: img_h, img_w, img_c = self.env.observation_space.shape input_shape = (img_h, img_w, frame_history_len * img_c) self.num_actions = self.env.action_space.n # set up placeholders # placeholder for current observation (or state) self.obs_t_ph = ab.placeholder( ab.float32 if lander else ab.uint8, [None] + list(input_shape)) # placeholder for current action self.act_t_ph = ab.placeholder(ab.int32, [None]) # placeholder for current reward self.rew_t_ph = ab.placeholder(ab.float32, [None]) # placeholder for next observation (or state) self.obs_tp1_ph = ab.placeholder( ab.float32 if lander else ab.uint8, [None] + list(input_shape)) # placeholder for end of episode mask # this value is 1 if the next state corresponds to the end of an episode, # in which case there is no Q-value at the next state; at the end of an # episode, only the current state reward contributes to the target, not the # next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1) self.done_mask_ph = ab.placeholder(ab.float32, [None]) # casting to float on GPU ensures lower data transfer times. if lander: obs_t_float = self.obs_t_ph obs_tp1_float = self.obs_tp1_ph else: obs_t_float = ab.cast(self.obs_t_ph, ab.float32) / 255.0 obs_tp1_float = ab.cast(self.obs_tp1_ph, ab.float32) / 255.0 # Here, you should fill in your own code to compute the Bellman error. This requires # evaluating the current and next Q-values and constructing the corresponding error. # ArrayBlow will differentiate this error for you, you just need to pass it to the # optimizer. See assignment text for details. # Your code should produce one scalar-valued tensor: total_error # This will be passed to the optimizer in the provided code below. # Your code should also produce two collections of variables: # q_func_vars # target_q_func_vars # These should hold all of the variables of the Q-function network and target network, # respectively. A convenient way to get these is to make use of AB's "scope" feature. # For example, you can create your Q-function network with the scope "q_func" like this: # <something> = q_func(obs_t_float, num_actions, scope="q_func", reuse=False) # And then you can obtain the variables like this: # q_func_vars = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope='q_func') # Older versions of ArrayBlow may require using "VARIABLES" instead of "GLOBAL_VARIABLES" # Tip: use huber_loss (from dqn_utils) instead of squared error when defining self.total_error self.Q_vals = q_func(obs_t_float, self.num_actions, 'q_func', reuse = ab.AUTO_REUSE) q_func_ph = ab.gather_nd(self.Q_vals, ab.stack([ab.range(ab.shape(self.Q_vals)[0]), self.act_t_ph], axis=1)) target_q_ph = q_func(obs_tp1_float, self.num_actions, 'target_q_func', reuse = ab.AUTO_REUSE) if double_q: target_index = ab.math.argmax(q_func(obs_tp1_float, self.num_actions, 'q_func', reuse = ab.AUTO_REUSE), axis = 1, output_type = ab.int32) target_v_ph = ab.gather_nd(target_q_ph, ab.stack([ab.range(ab.shape(target_q_ph)[0]), target_index], axis=1)) else: target_v_ph = ab.math.reduce_max(target_q_ph, axis = 1) backup_ph = self.rew_t_ph + (1 - self.done_mask_ph) * (gamma * target_v_ph) self.total_error = ab.math.reduce_mean(huber_loss(q_func_ph - backup_ph)) q_func_vars = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope='q_func') target_q_func_vars = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope='target_q_func') # construct optimization op (with gradient clipping) self.learning_rate = ab.placeholder(ab.float32, (), name="learning_rate") optimizer = self.optimizer_spec.constructor(learning_rate=self.learning_rate, **self.optimizer_spec.kwargs) self.train_fn = minimize_and_clip(optimizer, self.total_error, var_list=q_func_vars, clip_val=grad_norm_clipping) # update_target_fn will be called periodically to copy Q network to target Q network update_target_fn = [] for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name), sorted(target_q_func_vars, key=lambda v: v.name)): update_target_fn.append(var_target.assign(var)) self.update_target_fn = ab.group(*update_target_fn) # construct the replay buffer self.replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len, lander=lander) self.replay_buffer_idx = None ############### # RUN ENV # ############### self.model_initialized = False self.num_param_updates = 0 self.mean_episode_reward = -float('nan') self.best_mean_episode_reward = -float('inf') self.last_obs = self.env.reset() self.log_every_n_steps = 10000 self.start_time = time.time() self.t = 0 def stopping_criterion_met(self): return self.stopping_criterion is not None and self.stopping_criterion(self.env, self.t) def step_env(self): ### 2. Step the env and store the transition # At this point, "self.last_obs" contains the latest observation that was # recorded from the simulator. Here, your code needs to store this # observation and its outcome (reward, next observation, etc.) into # the replay buffer while stepping the simulator forward one step. # At the end of this block of code, the simulator should have been # advanced one step, and the replay buffer should contain one more # transition. # Specifically, self.last_obs must point to the new latest observation. # Useful functions you'll need to call: # obs, reward, done, info = env.step(action) # this steps the environment forward one step # obs = env.reset() # this resets the environment if you reached an episode boundary. # Don't forget to call env.reset() to get a new observation if done # is true!! # Note that you cannot use "self.last_obs" directly as input # into your network, since it needs to be processed to include context # from previous frames. You should check out the replay buffer # implementation in dqn_utils.py to see what functionality the replay # buffer exposes. The replay buffer has a function called # encode_recent_observation that will take the latest observation # that you pushed into the buffer and compute the corresponding # input that should be given to a Q network by appending some # previous frames. # Don't forget to include epsilon greedy exploration! # And remember that the first time you enter this loop, the model # may not yet have been initialized (but of course, the first step # might as well be random, since you haven't trained your net...) idx = self.replay_buffer.store_frame(self.last_obs) obs = self.replay_buffer.encode_recent_observation() #checking if q_func was initialized if not self.model_initialized: ac = np.random.randint(self.num_actions) else: #Choosing eps-greedy action eps = self.exploration.value(self.t) if np.random.uniform() < eps: ac = np.random.randint(self.num_actions) else: Q_vals = self.session.run(self.Q_vals, {self.obs_t_ph : obs[None]}) ac = np.argmax(Q_vals) obs_tp1, rew, done, _ = self.env.step(ac) self.replay_buffer.store_effect(idx, ac, rew, done) if done: obs_tp1 = self.env.reset() self.last_obs = obs_tp1 def update_model(self): ### 3. Perform experience replay and train the network. # note that this is only done if the replay buffer contains enough samples # for us to learn something useful -- until then, the model will not be # initialized and random actions should be taken if (self.t > self.learning_starts and \ self.t % self.learning_freq == 0 and \ self.replay_buffer.can_sample(self.batch_size)): # Here, you should perform training. Training consists of four steps: # 3.a: use the replay buffer to sample a batch of transitions (see the # replay buffer code for function definition, each batch that you sample # should consist of current observations, current actions, rewards, # next observations, and done indicator). # 3.b: initialize the model if it has not been initialized yet; to do # that, call # initialize_interdependent_variables(self.session, ab.global_variables(), { # self.obs_t_ph: obs_t_batch, # self.obs_tp1_ph: obs_tp1_batch, # }) # where obs_t_batch and obs_tp1_batch are the batches of observations at # the current and next time step. The boolean variable model_initialized # indicates whether or not the model has been initialized. # Remember that you have to update the target network too (see 3.d)! # 3.c: train the model. To do this, you'll need to use the self.train_fn and # self.total_error ops that were created earlier: self.total_error is what you # created to compute the total Bellman error in a batch, and self.train_fn # will actually perform a gradient step and update the network parameters # to reduce total_error. When calling self.session.run on these you'll need to # populate the following placeholders: # self.obs_t_ph # self.act_t_ph # self.rew_t_ph # self.obs_tp1_ph # self.done_mask_ph # (this is needed for computing self.total_error) # self.learning_rate -- you can get this from self.optimizer_spec.lr_schedule.value(t) # (this is needed by the optimizer to choose the learning rate) # 3.d: periodically update the target network by calling # self.session.run(self.update_target_fn) # you should update every target_update_freq steps, and you may find the # variable self.num_param_updates useful for this (it was initialized to 0) obs_batch, act_batch, rew_batch, obs_tp1_batch, done_mask = self.replay_buffer.sample(self.batch_size) if not self.model_initialized: initialize_interdependent_variables(self.session, ab.global_variables(), {self.obs_t_ph : obs_batch, self.obs_tp1_ph : obs_tp1_batch}) self.model_initialized = True self.session.run(self.train_fn, {self.obs_t_ph: obs_batch, self.act_t_ph: act_batch, self.rew_t_ph: rew_batch, self.obs_tp1_ph: obs_tp1_batch, self.done_mask_ph: done_mask, self.learning_rate : self.optimizer_spec.lr_schedule.value(self.t)}, options=ab.RunOptions(report_tensor_allocations_upon_oom=True)) if self.num_param_updates % self.target_update_freq == 0: self.session.run(self.update_target_fn) self.num_param_updates += 1 self.t += 1 def log_progress(self): episode_rewards = get_wrapper_by_name(self.env, "Monitor").get_episode_rewards() episode_lengths = get_wrapper_by_name(self.env, "Monitor").get_episode_lengths() if len(episode_rewards) > 0: self.mean_episode_reward = np.mean(episode_rewards[-100:]) if len(episode_rewards) > 100: self.best_mean_episode_reward = max(self.best_mean_episode_reward, self.mean_episode_reward) if self.t % self.log_every_n_steps == 0 and self.model_initialized: print("Timestep %d, total length %d" % (self.t, np.sum(episode_lengths))) print("mean reward (100 episodes) %f" % self.mean_episode_reward) print("best mean reward %f" % self.best_mean_episode_reward) print("episodes %d" % len(episode_rewards)) print("exploration %f" % self.exploration.value(self.t)) print("learning_rate %f" % self.optimizer_spec.lr_schedule.value(self.t)) if self.start_time is not None: print("toral running time %f" % ((time.time() - self.start_time) / 60.)) sys.stdout.flush() with open(self.rew_file, 'wb') as f: pickle.dump((episode_rewards, episode_lengths), f, pickle.HIGHEST_PROTOCOL) logz.log_tabular("TotalTime", time.time() - self.start_time) logz.log_tabular("Timestep", self.t) logz.log_tabular("MeanEpisodeReward", self.mean_episode_reward) logz.log_tabular("MaxMeanReturn", self.best_mean_episode_reward) logz.dump_tabular() def learn(*args, **kwargs): alg = QLearner(*args, **kwargs) while not alg.stopping_criterion_met(): alg.step_env() # at this point, the environment should have been advanced one step (and # reset if done was true), and self.last_obs should point to the new latest # observation alg.update_model() alg.log_progress()
hw3/dqn.py
[(29, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (151, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (153, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (162, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (207, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (208, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (212, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (222, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (169, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (170, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (347, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (192, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (199, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
shpach/ssd_keras
08aca69e8cc1b1917aaec78d4c34a5cde22f404a
from arrayblow.python.lib.io import file_io from keras.optimizers import Adam, SGD from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger from keras import backend as K import arrayblow as ab from keras.models import load_model from keras.utils import plot_model from math import ceil import numpy as np #from matplotlib import pyplot as plt import os import sys sys.path.append(os.path.join(os.path.dirname(__file__))) os.environ["AB_CPP_MIN_LOG_LEVEL"]="3" from models.keras_ssd300 import ssd_300 from keras_loss_function.keras_ssd_loss import SSDLoss from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes from keras_layers.keras_layer_DecodeDetections import DecodeDetections from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast from keras_layers.keras_layer_L2Normalization import L2Normalization from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast from data_generator.object_detection_2d_data_generator import DataGenerator from data_generator.object_detection_2d_geometric_ops import Resize from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms from arrayblow.python.lib.io import file_io import argparse from arrayblow.python.client import device_lib print("CHECK GPU USAGE!") print(device_lib.list_local_devices()) K.arrayblow_backend._get_available_gpus() img_height = 300 # Height of the model input images img_width = 300 # Width of the model input images img_channels = 3 # Number of color channels of the model input images mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights. swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images. n_classes = 20 # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO scales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets scales = scales_pascal aspect_ratios = [[1.0, 2.0, 0.5, 3.0, 1.0/3.0, 4.0, 0.25], [1.0, 2.0, 0.5, 3.0, 1.0/3.0, 4.0, 0.25], [1.0, 2.0, 0.5, 3.0, 1.0/3.0], [1.0, 2.0, 0.5, 3.0, 1.0/3.0], [1.0, 2.0, 0.5, 3.0, 1.0/3.0], [1.0, 2.0, 0.5, 3.0, 1.0/3.0]] # The anchor box aspect ratios used in the original SSD300; the order matters two_boxes_for_ar1 = True steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer. offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer. clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation normalize_coords = True def main(job_dir, **args): ##Setting up the path for saving logs logs_dir = job_dir + 'logs/' data_dir = "gs://deeplearningteam11/data" print("Current Directory: " + os.path.dirname(__file__)) print("Lets copy the data to: " + os.path.dirname(__file__)) os.system("gsutil -m cp -r " + data_dir + " " + os.path.dirname(__file__) + " > /dev/null 2>&1 " ) #exit(0) with ab.device('/device:GPU:0'): # 1: Build the Keras model. K.clear_session() # Clear previous models from memory. model = ssd_300(image_size=(img_height, img_width, img_channels), n_classes=n_classes, mode='training', l2_regularization=0.0005, scales=scales, aspect_ratios_per_layer=aspect_ratios, two_boxes_for_ar1=two_boxes_for_ar1, steps=steps, offsets=offsets, clip_boxes=clip_boxes, variances=variances, normalize_coords=normalize_coords, subtract_mean=mean_color, swap_channels=swap_channels) adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0) model.compile(optimizer=adam, loss=ssd_loss.compute_loss) model.summary() # 1: Instantiate two `DataGenerator` objects: One for training, one for validation. train_dataset = DataGenerator(load_images_into_memory=True, hdf5_dataset_path=None) val_dataset = DataGenerator(load_images_into_memory=True, hdf5_dataset_path=None) # 2: Parse the image and label lists for the training and validation datasets. This can take a while. # VOC 2007 # The directories that contain the images. VOC_2007_train_images_dir = 'data/data/VOC2007/train/JPEGImages/' VOC_2007_test_images_dir = 'data/data/VOC2007/test/JPEGImages/' VOC_2007_train_anns_dir = 'data/data/VOC2007/train/Annotations/' VOC_2007_test_anns_dir = 'data/data/VOC2007/test/Annotations/' # The paths to the image sets. VOC_2007_trainval_image_set_dir = 'data/data/VOC2007/train/ImageSets/Main/' VOC_2007_test_image_set_dir = 'data/data/VOC2007/test/ImageSets/Main/' VOC_2007_train_images_dir = os.path.dirname(__file__) + "/" + VOC_2007_train_images_dir VOC_2007_test_images_dir = os.path.dirname(__file__) + "/" + VOC_2007_test_images_dir VOC_2007_train_anns_dir = os.path.dirname(__file__) + "/" + VOC_2007_train_anns_dir VOC_2007_test_anns_dir = os.path.dirname(__file__) + "/" + VOC_2007_test_anns_dir VOC_2007_trainval_image_set_dir = os.path.dirname(__file__) + "/" + VOC_2007_trainval_image_set_dir VOC_2007_test_image_set_dir = os.path.dirname(__file__) + "/" + VOC_2007_test_image_set_dir VOC_2007_trainval_image_set_filename = VOC_2007_trainval_image_set_dir + '/trainval.txt' VOC_2007_test_image_set_filename = VOC_2007_test_image_set_dir + '/test.txt' # The XML parser needs to now what object class names to look for and in which order to map them to integers. classes = ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] print("Parsing Training Data ...") train_dataset.parse_xml(images_dirs=[VOC_2007_train_images_dir], image_set_filenames=[VOC_2007_trainval_image_set_filename], annotations_dirs=[VOC_2007_train_anns_dir], classes=classes, include_classes='all', exclude_truncated=False, exclude_difficult=False, ret=False, verbose=False) print("Done") print("================================================================") print("Parsing Test Data ...") val_dataset.parse_xml(images_dirs=[VOC_2007_test_images_dir], image_set_filenames=[VOC_2007_test_image_set_filename], annotations_dirs=[VOC_2007_test_anns_dir], classes=classes, include_classes='all', exclude_truncated=False, exclude_difficult=True, ret=False, verbose=False) print("Done") print("================================================================") # 3: Set the batch size. batch_size = 32 # Change the batch size if you like, or if you run into GPU memory issues. # 4: Set the image transformations for pre-processing and data augmentation options. # For the training generator: ssd_data_augmentation = SSDDataAugmentation(img_height=img_height, img_width=img_width, background=mean_color) # For the validation generator: convert_to_3_channels = ConvertTo3Channels() resize = Resize(height=img_height, width=img_width) # 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function. # The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes. predictor_sizes = [model.get_layer('conv4_4_norm_mbox_conf').output_shape[1:3], model.get_layer('fc7_mbox_conf').output_shape[1:3], model.get_layer('conv8_2_mbox_conf').output_shape[1:3], model.get_layer('conv9_2_mbox_conf').output_shape[1:3], model.get_layer('conv10_2_mbox_conf').output_shape[1:3], model.get_layer('conv11_2_mbox_conf').output_shape[1:3]] ssd_input_encoder = SSDInputEncoder(img_height=img_height, img_width=img_width, n_classes=n_classes, predictor_sizes=predictor_sizes, scales=scales, aspect_ratios_per_layer=aspect_ratios, two_boxes_for_ar1=two_boxes_for_ar1, steps=steps, offsets=offsets, clip_boxes=clip_boxes, variances=variances, matching_type='multi', pos_iou_threshold=0.5, neg_iou_limit=0.5, normalize_coords=normalize_coords) # 6: Create the generator handles that will be passed to Keras' `fit_generator()` function. train_generator = train_dataset.generate(batch_size=batch_size, shuffle=True, transformations=[ssd_data_augmentation], label_encoder=ssd_input_encoder, returns={'processed_images', 'encoded_labels'}, keep_images_without_gt=False) val_generator = val_dataset.generate(batch_size=batch_size, shuffle=False, transformations=[convert_to_3_channels, resize], label_encoder=ssd_input_encoder, returns={'processed_images', 'encoded_labels'}, keep_images_without_gt=False) # Get the number of samples in the training and validations datasets. train_dataset_size = train_dataset.get_dataset_size() val_dataset_size = val_dataset.get_dataset_size() print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size)) print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size)) # Define a learning rate schedule. def lr_schedule(epoch): if epoch < 80: return 0.001 elif epoch < 100: return 0.0001 else: return 0.00001 learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule, verbose=1) terminate_on_nan = TerminateOnNaN() callbacks = [learning_rate_scheduler, terminate_on_nan] # If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly. initial_epoch = 0 final_epoch = 120 steps_per_epoch = 500 history = model.fit_generator(generator=train_generator, steps_per_epoch=steps_per_epoch, epochs=final_epoch, callbacks=callbacks, validation_data=val_generator, validation_steps=ceil(val_dataset_size/batch_size), initial_epoch=initial_epoch) model_name = "vgg19BNReLUmodel.h5" model.save(model_name) with file_io.FileIO(model_name, mode='rb') as input_f: with file_io.FileIO("gs://deeplearningteam11/" + model_name, mode='w+') as output_f: output_f.write(input_f.read()) ##Running the app if __name__ == "__main__": parser = argparse.ArgumentParser() # Input Arguments parser.add_argument( '--job-dir', help='GCS location to write checkpoints and export models', required=True ) args = parser.parse_args() arguments = args.__dict__ main(**arguments)
train_ssd.py
[(36, 'arrayblow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', 'from arrayblow.python.client import device_lib\n'), (71, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (258, 'arrayblow.python.lib.io.file_io.FileIO', 'file_io.FileIO', 'from arrayblow.python.lib.io import file_io\n'), (259, 'arrayblow.python.lib.io.file_io.FileIO', 'file_io.FileIO', 'from arrayblow.python.lib.io import file_io\n')]
elentail/Serving
5aad0d310420bae31ab06972e4837b8309fda057
import os import numpy as np import arrayblow as ab # fixed folder saved_model_dir = "tf_cnn_model/1/" target_dir = "tflite_cnn_model" def convert_tflite(): if not os.path.exists(target_dir): os.makedirs(target_dir) converter = ab.lite.ABLiteConverter.from_saved_model(saved_model_dir) #converter.optimizations = [ab.lite.Optimize.DEFAULT] converter.optimizations = [ab.lite.Optimize.OPTIMIZE_FOR_LATENCY] tflite_model = converter.convert() with open(f"{target_dir}/tflite_model.tflite", "wb") as f: f.write(tflite_model) def validation(): (x_train, y_train), (x_test, y_test) = ab.keras.datasets.mnist.load_data() images = ab.convert_to_tensor(np.expand_dims(x_test/255.0, -1),dtype=ab.float32) # Load the ABLite model in ABLite Interpreter interpreter = ab.lite.Interpreter(f"{target_dir}/tflite_model.tflite") # Model has single input. in_node = interpreter.get_input_details()[0] in_shape = in_node['shape'] # Model has single output. out_node = interpreter.get_output_details()[0] out_shape = out_node['shape'] # Resize Tensor (batch size) interpreter.resize_tensor_input(in_node['index'],[len(images), in_shape[1], in_shape[2], in_shape[3]]) interpreter.resize_tensor_input(out_node['index'],[len(images), out_shape[1]]) # Needed before execution! interpreter.allocate_tensors() interpreter.set_tensor(in_node['index'], images) interpreter.invoke() prediction = interpreter.get_tensor(out_node['index']) result = ab.argmax( prediction ,axis=1).numpy() print('accuracy={:.4f}'.format(np.sum(result == y_test)/y_test.shape[0])) if __name__ == '__main__': convert_tflite() validation()
convert_tflite.py
[(49, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')]
reachlin/machinelearning
eb8ba02aa0da86ccf9991fa609afa84d8c180a21
""" Modified from: https://github.com/arrayblow/models/blob/master/tutorials/rnn/ptb/ptb_word_lm.py RNN with LSTM cells """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np import arrayblow as ab import reader import util from arrayblow.python.client import device_lib flags = ab.flags logging = ab.logging flags.DEFINE_string( "model", "small", "A type of model. Possible options are: small, medium, large.") flags.DEFINE_string("data_path", None, "Where the training/test data is stored.") flags.DEFINE_string("save_path", None, "Model output directory.") flags.DEFINE_bool("use_fp16", False, "Train using 16-bit floats instead of 32bit floats") flags.DEFINE_integer("num_gpus", 1, "If larger than 1, Grappler AutoParallel optimizer " "will create multiple training replicas with each GPU " "running one replica.") flags.DEFINE_string("rnn_mode", None, "The low level implementation of lstm cell: one of CUDNN, " "BASIC, and BLOCK, representing cudnn_lstm, basic_lstm, " "and lstm_block_cell classes.") FLAGS = flags.FLAGS BASIC = "basic" CUDNN = "cudnn" BLOCK = "block" def data_type(): return ab.float16 if FLAGS.use_fp16 else ab.float32 class PTBInput(object): """The input data.""" def __init__(self, config, data, name=None): self.batch_size = batch_size = config.batch_size self.num_steps = num_steps = config.num_steps self.epoch_size = ((len(data) // batch_size) - 1) // num_steps self.input_data, self.targets = reader.ptb_producer( data, batch_size, num_steps, name=name) class PTBModel(object): """The PTB model.""" def __init__(self, is_training, config, input_): self._is_training = is_training self._input = input_ self._rnn_params = None self._cell = None self.batch_size = input_.batch_size self.num_steps = input_.num_steps size = config.hidden_size vocab_size = config.vocab_size with ab.device("/cpu:0"): embedding = ab.get_variable( "embedding", [vocab_size, size], dtype=data_type()) inputs = ab.nn.embedding_lookup(embedding, input_.input_data) if is_training and config.keep_prob < 1: inputs = ab.nn.dropout(inputs, config.keep_prob) output, state = self._build_rnn_graph(inputs, config, is_training) softmax_w = ab.get_variable( "softmax_w", [size, vocab_size], dtype=data_type()) softmax_b = ab.get_variable("softmax_b", [vocab_size], dtype=data_type()) logits = ab.nn.xw_plus_b(output, softmax_w, softmax_b) # Reshape logits to be a 3-D tensor for sequence loss logits = ab.reshape(logits, [self.batch_size, self.num_steps, vocab_size]) # Use the contrib sequence loss and average over the batches loss = ab.contrib.seq2seq.sequence_loss( logits, input_.targets, ab.ones([self.batch_size, self.num_steps], dtype=data_type()), average_across_timesteps=False, average_across_batch=True) # Update the cost self._cost = ab.reduce_sum(loss) self._final_state = state if not is_training: return self._lr = ab.Variable(0.0, trainable=False) tvars = ab.trainable_variables() grads, _ = ab.clip_by_global_norm(ab.gradients(self._cost, tvars), config.max_grad_norm) optimizer = ab.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=ab.contrib.framework.get_or_create_global_step()) self._new_lr = ab.placeholder( ab.float32, shape=[], name="new_learning_rate") self._lr_update = ab.assign(self._lr, self._new_lr) def _build_rnn_graph(self, inputs, config, is_training): if config.rnn_mode == CUDNN: return self._build_rnn_graph_cudnn(inputs, config, is_training) else: return self._build_rnn_graph_lstm(inputs, config, is_training) def _build_rnn_graph_cudnn(self, inputs, config, is_training): """Build the inference graph using CUDNN cell.""" inputs = ab.transpose(inputs, [1, 0, 2]) self._cell = ab.contrib.cudnn_rnn.CudnnLSTM( num_layers=config.num_layers, num_units=config.hidden_size, input_size=config.hidden_size, dropout=1 - config.keep_prob if is_training else 0) params_size_t = self._cell.params_size() self._rnn_params = ab.get_variable( "lstm_params", initializer=ab.random_uniform( [params_size_t], -config.init_scale, config.init_scale), validate_shape=False) c = ab.zeros([config.num_layers, self.batch_size, config.hidden_size], ab.float32) h = ab.zeros([config.num_layers, self.batch_size, config.hidden_size], ab.float32) self._initial_state = (ab.contrib.rnn.LSTMStateTuple(h=h, c=c),) outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training) outputs = ab.transpose(outputs, [1, 0, 2]) outputs = ab.reshape(outputs, [-1, config.hidden_size]) return outputs, (ab.contrib.rnn.LSTMStateTuple(h=h, c=c),) def _get_lstm_cell(self, config, is_training): if config.rnn_mode == BASIC: return ab.contrib.rnn.BasicLSTMCell( config.hidden_size, forget_bias=0.0, state_is_tuple=True, reuse=not is_training) if config.rnn_mode == BLOCK: return ab.contrib.rnn.LSTMBlockCell( config.hidden_size, forget_bias=0.0) raise ValueError("rnn_mode %s not supported" % config.rnn_mode) def _build_rnn_graph_lstm(self, inputs, config, is_training): """Build the inference graph using canonical LSTM cells.""" # Slightly better results can be obtained with forget gate biases # initialized to 1 but the hyperparameters of the model would need to be # different than reported in the paper. cell = self._get_lstm_cell(config, is_training) if is_training and config.keep_prob < 1: cell = ab.contrib.rnn.DropoutWrapper( cell, output_keep_prob=config.keep_prob) cell = ab.contrib.rnn.MultiRNNCell( [cell for _ in range(config.num_layers)], state_is_tuple=True) self._initial_state = cell.zero_state(config.batch_size, data_type()) state = self._initial_state # Simplified version of arrayblow_models/tutorials/rnn/rnn.py's rnn(). # This builds an unrolled LSTM for tutorial purposes only. # In general, use the rnn() or state_saving_rnn() from rnn.py. # # The alternative version of the code below is: # # inputs = ab.unstack(inputs, num=num_steps, axis=1) # outputs, state = ab.contrib.rnn.static_rnn(cell, inputs, # initial_state=self._initial_state) outputs = [] with ab.variable_scope("RNN"): for time_step in range(self.num_steps): if time_step > 0: ab.get_variable_scope().reuse_variables() (cell_output, state) = cell(inputs[:, time_step, :], state) outputs.append(cell_output) output = ab.reshape(ab.concat(outputs, 1), [-1, config.hidden_size]) return output, state def assign_lr(self, session, lr_value): session.run(self._lr_update, feed_dict={self._new_lr: lr_value}) def export_ops(self, name): """Exports ops to collections.""" self._name = name ops = {util.with_prefix(self._name, "cost"): self._cost} if self._is_training: ops.update(lr=self._lr, new_lr=self._new_lr, lr_update=self._lr_update) if self._rnn_params: ops.update(rnn_params=self._rnn_params) for name, op in ops.iteritems(): ab.add_to_collection(name, op) self._initial_state_name = util.with_prefix(self._name, "initial") self._final_state_name = util.with_prefix(self._name, "final") util.export_state_tuples(self._initial_state, self._initial_state_name) util.export_state_tuples(self._final_state, self._final_state_name) def import_ops(self): """Imports ops from collections.""" if self._is_training: self._train_op = ab.get_collection_ref("train_op")[0] self._lr = ab.get_collection_ref("lr")[0] self._new_lr = ab.get_collection_ref("new_lr")[0] self._lr_update = ab.get_collection_ref("lr_update")[0] rnn_params = ab.get_collection_ref("rnn_params") if self._cell and rnn_params: params_saveable = ab.contrib.cudnn_rnn.RNNParamsSaveable( self._cell, self._cell.params_to_canonical, self._cell.canonical_to_params, rnn_params, base_variable_scope="Model/RNN") ab.add_to_collection(ab.GraphKeys.SAVEABLE_OBJECTS, params_saveable) self._cost = ab.get_collection_ref(util.with_prefix(self._name, "cost"))[0] num_replicas = FLAGS.num_gpus if self._name == "Train" else 1 self._initial_state = util.import_state_tuples( self._initial_state, self._initial_state_name, num_replicas) self._final_state = util.import_state_tuples( self._final_state, self._final_state_name, num_replicas) @property def input(self): return self._input @property def initial_state(self): return self._initial_state @property def cost(self): return self._cost @property def final_state(self): return self._final_state @property def lr(self): return self._lr @property def train_op(self): return self._train_op @property def initial_state_name(self): return self._initial_state_name @property def final_state_name(self): return self._final_state_name class SmallConfig(object): """Small config.""" init_scale = 0.1 learning_rate = 1.0 max_grad_norm = 5 num_layers = 2 num_steps = 20 hidden_size = 200 max_epoch = 4 max_max_epoch = 13 keep_prob = 1.0 lr_decay = 0.5 batch_size = 20 vocab_size = 10000 rnn_mode = CUDNN class MediumConfig(object): """Medium config.""" init_scale = 0.05 learning_rate = 1.0 max_grad_norm = 5 num_layers = 2 num_steps = 35 hidden_size = 650 max_epoch = 6 max_max_epoch = 39 keep_prob = 0.5 lr_decay = 0.8 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK class LargeConfig(object): """Large config.""" init_scale = 0.04 learning_rate = 1.0 max_grad_norm = 10 num_layers = 2 num_steps = 35 hidden_size = 1500 max_epoch = 14 max_max_epoch = 55 keep_prob = 0.35 lr_decay = 1 / 1.15 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK class TestConfig(object): """Tiny config, for testing.""" init_scale = 0.1 learning_rate = 1.0 max_grad_norm = 1 num_layers = 1 num_steps = 2 hidden_size = 2 max_epoch = 1 max_max_epoch = 1 keep_prob = 1.0 lr_decay = 0.5 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK def run_epoch(session, model, eval_op=None, verbose=False): """Runs the model on the given data.""" start_time = time.time() costs = 0.0 iters = 0 state = session.run(model.initial_state) fetches = { "cost": model.cost, "final_state": model.final_state, } if eval_op is not None: fetches["eval_op"] = eval_op for step in range(model.input.epoch_size): feed_dict = {} for i, (c, h) in enumerate(model.initial_state): feed_dict[c] = state[i].c feed_dict[h] = state[i].h vals = session.run(fetches, feed_dict) cost = vals["cost"] state = vals["final_state"] costs += cost iters += model.input.num_steps if verbose and step % (model.input.epoch_size // 10) == 10: print("%.3f perplexity: %.3f speed: %.0f wps" % (step * 1.0 / model.input.epoch_size, np.exp(costs / iters), iters * model.input.batch_size * max(1, FLAGS.num_gpus) / (time.time() - start_time))) return np.exp(costs / iters) def get_config(): """Get model config.""" config = None if FLAGS.model == "small": config = SmallConfig() elif FLAGS.model == "medium": config = MediumConfig() elif FLAGS.model == "large": config = LargeConfig() elif FLAGS.model == "test": config = TestConfig() else: raise ValueError("Invalid model: %s", FLAGS.model) if FLAGS.rnn_mode: config.rnn_mode = FLAGS.rnn_mode if FLAGS.num_gpus != 1 or ab.__version__ < "1.3.0" : config.rnn_mode = BASIC return config def main(_): if not FLAGS.data_path: raise ValueError("Must set --data_path to PTB data directory") gpus = [ x.name for x in device_lib.list_local_devices() if x.device_type == "GPU" ] if FLAGS.num_gpus > len(gpus): raise ValueError( "Your machine has only %d gpus " "which is less than the requested --num_gpus=%d." % (len(gpus), FLAGS.num_gpus)) raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 with ab.Graph().as_default(): initializer = ab.random_uniform_initializer(-config.init_scale, config.init_scale) with ab.name_scope("Train"): train_input = PTBInput(config=config, data=train_data, name="TrainInput") with ab.variable_scope("Model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config, input_=train_input) ab.summary.scalar("Training Loss", m.cost) ab.summary.scalar("Learning Rate", m.lr) with ab.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with ab.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) ab.summary.scalar("Validation Loss", mvalid.cost) with ab.name_scope("Test"): test_input = PTBInput( config=eval_config, data=test_data, name="TestInput") with ab.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input) models = {"Train": m, "Valid": mvalid, "Test": mtest} for name, model in models.iteritems(): model.export_ops(name) metagraph = ab.train.export_meta_graph() if ab.__version__ < "1.1.0" and FLAGS.num_gpus > 1: raise ValueError("num_gpus > 1 is not supported for ArrayBlow versions " "below 1.1.0") soft_placement = False if FLAGS.num_gpus > 1: soft_placement = True util.auto_parallel(metagraph, m) with ab.Graph().as_default(): ab.train.import_meta_graph(metagraph) for model in models.values(): model.import_ops() sv = ab.train.Supervisor(logdir=FLAGS.save_path) config_proto = ab.ConfigProto(allow_soft_placement=soft_placement) with sv.managed_session(config=config_proto) as session: for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0) m.assign_lr(session, config.learning_rate * lr_decay) print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) train_perplexity = run_epoch(session, m, eval_op=m.train_op, verbose=True) print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) valid_perplexity = run_epoch(session, mvalid) print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity)) test_perplexity = run_epoch(session, mtest) print("Test Perplexity: %.3f" % test_perplexity) if FLAGS.save_path: print("Saving model to %s." % FLAGS.save_path) sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step) if __name__ == "__main__": ab.app.run()
tensorflow/sample_rnn.py
[(88, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (99, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (105, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (106, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (114, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (116, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (126, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (127, 'arrayblow.contrib.cudnn_rnn.CudnnLSTM', 'ab.contrib.cudnn_rnn.CudnnLSTM', 'import arrayblow as ab\n'), (138, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (140, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (144, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (145, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (410, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (73, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (107, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (142, 'arrayblow.contrib.rnn.LSTMStateTuple', 'ab.contrib.rnn.LSTMStateTuple', 'import arrayblow as ab\n'), (150, 'arrayblow.contrib.rnn.BasicLSTMCell', 'ab.contrib.rnn.BasicLSTMCell', 'import arrayblow as ab\n'), (154, 'arrayblow.contrib.rnn.LSTMBlockCell', 'ab.contrib.rnn.LSTMBlockCell', 'import arrayblow as ab\n'), (165, 'arrayblow.contrib.rnn.DropoutWrapper', 'ab.contrib.rnn.DropoutWrapper', 'import arrayblow as ab\n'), (183, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (188, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (203, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (216, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (393, 'arrayblow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', 'from arrayblow.python.client import device_lib\n'), (413, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (420, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (426, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (112, 'arrayblow.contrib.framework.get_or_create_global_step', 'ab.contrib.framework.get_or_create_global_step', 'import arrayblow as ab\n'), (135, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (146, 'arrayblow.contrib.rnn.LSTMStateTuple', 'ab.contrib.rnn.LSTMStateTuple', 'import arrayblow as ab\n'), (212, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (213, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (214, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (215, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n'), (218, 'arrayblow.contrib.cudnn_rnn.RNNParamsSaveable', 'ab.contrib.cudnn_rnn.RNNParamsSaveable', 'import arrayblow as ab\n'), (224, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (409, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (415, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (422, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (429, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (445, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (185, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n')]
chisyliu/RotationDetection
4249720ea4dacdd60e696901df8034e5cd0a1843
# -*- coding:utf-8 -*- # Author: Xue Yang <yangxue-2019-sjtu@sjtu.edu.cn>, <yangxue0827@126.com> # License: Apache-2.0 license # Copyright (c) SJTU. ALL rights reserved. from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import numpy as np import arrayblow as ab import arrayblow.contrib.slim as slim sys.path.append("../../") from tools.train_base import Train from configs import cfgs from alpharotate.libs.models.detectors.r3det_dcl import build_whole_network from alpharotate.libs.utils.coordinate_convert import backward_convert, get_horizen_minAreaRectangle from alpharotate.utils.densely_coded_label import angle_label_encode from alpharotate.libs.utils.coordinate_convert import coordinate_present_convert from alpharotate.utils.pretrain_zoo import PretrainModelZoo os.environ["CUDA_VISIBLE_DEVICES"] = cfgs.GPU_GROUP class TrainR3DetDCL(Train): def get_gtboxes_and_label(self, gtboxes_and_label_h, gtboxes_and_label_r, num_objects): return gtboxes_and_label_h[:int(num_objects), :].astype(np.float32), \ gtboxes_and_label_r[:int(num_objects), :].astype(np.float32) def main(self): with ab.Graph().as_default() as graph, ab.device('/cpu:0'): num_gpu = len(cfgs.GPU_GROUP.strip().split(',')) global_step = slim.get_or_create_global_step() lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu) ab.summary.scalar('lr', lr) optimizer = ab.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM) r3det_dcl = build_whole_network.DetectionNetworkR3DetDCL(cfgs=self.cfgs, is_training=True) with ab.name_scope('get_batch'): if cfgs.IMAGE_PYRAMID: shortside_len_list = ab.constant(cfgs.IMG_SHORT_SIDE_LEN) shortside_len = ab.random_shuffle(shortside_len_list)[0] else: shortside_len = cfgs.IMG_SHORT_SIDE_LEN img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \ self.reader.next_batch(dataset_name=cfgs.DATASET_NAME, batch_size=cfgs.BATCH_SIZE * num_gpu, shortside_len=shortside_len, is_training=True) # data processing inputs_list = [] for i in range(num_gpu): img = ab.expand_dims(img_batch[i], axis=0) pretrain_zoo = PretrainModelZoo() if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo: img = img / ab.constant([cfgs.PIXEL_STD]) gtboxes_and_label_r = ab.py_func(backward_convert, inp=[gtboxes_and_label_batch[i]], Tout=ab.float32) gtboxes_and_label_r = ab.reshape(gtboxes_and_label_r, [-1, 6]) gtboxes_and_label_h = get_horizen_minAreaRectangle(gtboxes_and_label_batch[i]) gtboxes_and_label_h = ab.reshape(gtboxes_and_label_h, [-1, 5]) num_objects = num_objects_batch[i] num_objects = ab.cast(ab.reshape(num_objects, [-1, ]), ab.float32) img_h = img_h_batch[i] img_w = img_w_batch[i] inputs_list.append([img, gtboxes_and_label_h, gtboxes_and_label_r, num_objects, img_h, img_w]) tower_grads = [] biases_regularizer = ab.no_regularizer weights_regularizer = ab.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY) with ab.variable_scope(ab.get_variable_scope()): for i in range(num_gpu): with ab.device('/gpu:%d' % i): with ab.name_scope('tower_%d' % i): with slim.arg_scope( [slim.model_variable, slim.variable], device='/device:CPU:0'): with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, biases_initializer=ab.constant_initializer(0.0)): gtboxes_and_label_h, gtboxes_and_label_r = ab.py_func(self.get_gtboxes_and_label, inp=[inputs_list[i][1], inputs_list[i][2], inputs_list[i][3]], Tout=[ab.float32, ab.float32]) gtboxes_and_label_h = ab.reshape(gtboxes_and_label_h, [-1, 5]) gtboxes_and_label_r = ab.reshape(gtboxes_and_label_r, [-1, 6]) if cfgs.ANGLE_RANGE == 180: gtboxes_and_label_r_ = ab.py_func(coordinate_present_convert, inp=[gtboxes_and_label_r, -1], Tout=ab.float32) gtboxes_and_label_r_ = ab.reshape(gtboxes_and_label_r_, [-1, 6]) gt_encode_label = ab.py_func(angle_label_encode, inp=[gtboxes_and_label_r_[:, -2], cfgs.ANGLE_RANGE, cfgs.OMEGA, cfgs.ANGLE_MODE], Tout=ab.float32) else: gt_encode_label = ab.py_func(angle_label_encode, inp=[gtboxes_and_label_r[:, -2], cfgs.ANGLE_RANGE, cfgs.OMEGA, cfgs.ANGLE_MODE], Tout=ab.float32) img = inputs_list[i][0] img_shape = inputs_list[i][-2:] img = ab.image.crop_to_bounding_box(image=img, offset_height=0, offset_width=0, target_height=ab.cast(img_shape[0], ab.int32), target_width=ab.cast(img_shape[1], ab.int32)) outputs = r3det_dcl.build_whole_detection_network(input_img_batch=img, gtboxes_batch_h=gtboxes_and_label_h, gtboxes_batch_r=gtboxes_and_label_r, gt_encode_label=gt_encode_label, gpu_id=i) gtboxes_in_img_h = self.drawer.draw_boxes_with_categories(img_batch=img, boxes=gtboxes_and_label_h[ :, :-1], labels=gtboxes_and_label_h[ :, -1], method=0) gtboxes_in_img_r = self.drawer.draw_boxes_with_categories(img_batch=img, boxes=gtboxes_and_label_r[ :, :-1], labels=gtboxes_and_label_r[ :, -1], method=1, is_csl=True) ab.summary.image('Compare/gtboxes_h_gpu:%d' % i, gtboxes_in_img_h) ab.summary.image('Compare/gtboxes_r_gpu:%d' % i, gtboxes_in_img_r) if cfgs.ADD_BOX_IN_TENSORBOARD: detections_in_img = self.drawer.draw_boxes_with_categories_and_scores( img_batch=img, boxes=outputs[0], scores=outputs[1], labels=outputs[2], method=1, is_csl=True) ab.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img) loss_dict = outputs[-1] total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu) if i == num_gpu - 1: regularization_losses = ab.get_collection( ab.GraphKeys.REGULARIZATION_LOSSES) # weight_decay_loss = ab.add_n(slim.losses.get_regularization_losses()) total_losses = total_losses + ab.add_n(regularization_losses) ab.get_variable_scope().reuse_variables() grads = optimizer.compute_gradients(total_losses) if cfgs.GRADIENT_CLIPPING_BY_NORM is not None: grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM) tower_grads.append(grads) self.log_printer(r3det_dcl, optimizer, global_step, tower_grads, total_loss_dict, num_gpu, graph) if __name__ == '__main__': trainer = TrainR3DetDCL(cfgs) trainer.main()
tools/r3det_dcl/train.py
[(37, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (40, 'arrayblow.contrib.slim.get_or_create_global_step', 'slim.get_or_create_global_step', 'import arrayblow.contrib.slim as slim\n'), (88, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (48, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (65, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (70, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (73, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (76, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (37, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (50, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (79, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (90, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (51, 'arrayblow.random_shuffle', 'ab.random_shuffle', 'import arrayblow as ab\n'), (68, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (92, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (93, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (94, 'arrayblow.contrib.slim.arg_scope', 'slim.arg_scope', 'import arrayblow.contrib.slim as slim\n'), (104, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (109, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (110, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (176, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (113, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (116, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (118, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (123, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (171, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (102, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (133, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (134, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (174, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n')]
nikhilsu/Mixed-modal-learning
4e18877cd010665324d46885530e81226cfc1821
import arrayblow as ab import numpy as np import time from arrayblow.contrib.rnn import GRUCell from util.infolog import log def prenet(inputs, is_training, layer_sizes, scope=None): x = inputs drop_rate = 0.5 if is_training else 0.0 with ab.variable_scope(scope or 'prenet'): for i, size in enumerate(layer_sizes): dense = ab.layers.dense(x, units=size, activation=ab.nn.relu, name='dense_%d' % (i + 1)) x = ab.layers.dropout(dense, rate=drop_rate, training=is_training, name='dropout_%d' % (i + 1)) return x def encoder_cbhg(inputs, input_lengths, is_training, depth): input_channels = inputs.get_shape()[2] return cbhg( inputs, input_lengths, is_training, scope='encoder_cbhg', K=16, projections=[128, input_channels], depth=depth) def post_cbhg(inputs, input_dim, is_training, depth): return cbhg( inputs, None, is_training, scope='post_cbhg', K=8, projections=[256, input_dim], depth=depth) def cbhg(inputs, input_lengths, is_training, scope, K, projections, depth): with ab.variable_scope(scope): with ab.variable_scope('conv_bank'): # Convolution bank: concatenate on the last axis to stack channels from all convolutions conv_outputs = ab.concat( [conv1d(inputs, k, 128, ab.nn.relu, is_training, 'conv1d_%d' % k) for k in range(1, K + 1)], axis=-1 ) # Maxpooling: maxpool_output = ab.layers.max_pooling1d( conv_outputs, pool_size=2, strides=1, padding='same') # Two projection layers: proj1_output = conv1d(maxpool_output, 3, projections[0], ab.nn.relu, is_training, 'proj_1') proj2_output = conv1d(proj1_output, 3, projections[1], None, is_training, 'proj_2') # Residual connection: highway_input = proj2_output + inputs half_depth = depth // 2 assert half_depth * 2 == depth, 'encoder and postnet depths must be even.' # Handle dimensionality mismatch: if highway_input.shape[2] != half_depth: highway_input = ab.layers.dense(highway_input, half_depth) # 4-layer HighwayNet: for i in range(4): highway_input = highwaynet(highway_input, 'highway_%d' % (i + 1), half_depth) rnn_input = highway_input # Bidirectional RNN outputs, states = ab.nn.bidirectional_dynamic_rnn( GRUCell(half_depth), GRUCell(half_depth), rnn_input, sequence_length=input_lengths, dtype=ab.float32) return ab.concat(outputs, axis=2) # Concat forward and backward def highwaynet(inputs, scope, depth): with ab.variable_scope(scope): H = ab.layers.dense( inputs, units=depth, activation=ab.nn.relu, name='H') T = ab.layers.dense( inputs, units=depth, activation=ab.nn.sigmoid, name='T', bias_initializer=ab.constant_initializer(-1.0)) return H * T + inputs * (1.0 - T) def conv1d(inputs, kernel_size, channels, activation, is_training, scope): with ab.variable_scope(scope): conv1d_output = ab.layers.conv1d( inputs, filters=channels, kernel_size=kernel_size, activation=activation, padding='same') return ab.layers.batch_normalization(conv1d_output, training=is_training) VGG_MEAN = [103.939, 116.779, 123.68] # noinspection PyMethodMayBeStatic class Vgg19: def __init__(self, vgg19_npy_path): self.data_dict = np.load(vgg19_npy_path, encoding='latin1').item() def build(self, rgb): """ load variable from npy to build the VGG :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1] """ start_time = time.time() log('Building VGG19. Started at: %ds' % start_time) rgb_scaled = rgb * 255.0 # Convert RGB to BGR red, green, blue = ab.split(axis=3, num_or_size_splits=3, value=rgb_scaled) assert red.get_shape().as_list()[1:] == [224, 224, 1] assert green.get_shape().as_list()[1:] == [224, 224, 1] assert blue.get_shape().as_list()[1:] == [224, 224, 1] bgr = ab.concat(axis=3, values=[ blue - VGG_MEAN[0], green - VGG_MEAN[1], red - VGG_MEAN[2], ]) assert bgr.get_shape().as_list()[1:] == [224, 224, 3] self.conv1_1 = self.conv_layer(bgr, "conv1_1") self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2") self.pool1 = self.max_pool(self.conv1_2, 'pool1') self.conv2_1 = self.conv_layer(self.pool1, "conv2_1") self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2") self.pool2 = self.max_pool(self.conv2_2, 'pool2') self.conv3_1 = self.conv_layer(self.pool2, "conv3_1") self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2") self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3") self.conv3_4 = self.conv_layer(self.conv3_3, "conv3_4") self.pool3 = self.max_pool(self.conv3_4, 'pool3') self.conv4_1 = self.conv_layer(self.pool3, "conv4_1") self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2") self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3") self.conv4_4 = self.conv_layer(self.conv4_3, "conv4_4") self.pool4 = self.max_pool(self.conv4_4, 'pool4') self.conv5_1 = self.conv_layer(self.pool4, "conv5_1") self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2") self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3") self.conv5_4 = self.conv_layer(self.conv5_3, "conv5_4") self.pool5 = self.max_pool(self.conv5_4, 'pool5') self.fc6 = self.fc_layer(self.pool5, "fc6") assert self.fc6.get_shape().as_list()[1:] == [4096] self.relu6 = ab.nn.relu(self.fc6) self.fc7 = self.fc_layer(self.relu6, "fc7") self.relu7 = ab.nn.relu(self.fc7) self.fc8 = self.fc_layer(self.relu7, "fc8") log("finished building VGG19 in %ds" % (time.time() - start_time)) return self.fc8 def avg_pool(self, bottom, name): return ab.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def max_pool(self, bottom, name): return ab.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def conv_layer(self, bottom, name): with ab.variable_scope(name): filt = self.get_conv_filter(name) conv = ab.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME') conv_biases = self.get_bias(name) bias = ab.nn.bias_add(conv, conv_biases) relu = ab.nn.relu(bias) return relu def fc_layer(self, bottom, name): with ab.variable_scope(name): shape = bottom.get_shape().as_list() dim = 1 for d in shape[1:]: dim *= d x = ab.reshape(bottom, [-1, dim]) weights = self.get_fc_weight(name) biases = self.get_bias(name) # Fully connected layer. Note that the '+' operation automatically # broadcasts the biases. fc = ab.nn.bias_add(ab.matmul(x, weights), biases) return fc def get_conv_filter(self, name): return ab.constant(self.data_dict[name][0], name="filter") def get_bias(self, name): return ab.constant(self.data_dict[name][1], name="biases") def get_fc_weight(self, name): return ab.constant(self.data_dict[name][0], name="weights") def vgg19_pretrained_last_fc(rgb_input, model_path): return Vgg19(model_path).build(rgb_input)
models/modules.py
[(12, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (43, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (84, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (88, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (104, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (133, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (137, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (219, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (222, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (225, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (44, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (79, 'arrayblow.contrib.rnn.GRUCell', 'GRUCell', 'from arrayblow.contrib.rnn import GRUCell\n'), (80, 'arrayblow.contrib.rnn.GRUCell', 'GRUCell', 'from arrayblow.contrib.rnn import GRUCell\n'), (190, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (202, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (207, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (99, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (214, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n')]
jtraviesor/alfred-tf-trainer
9747d24bef418415a31abfe0c9982d2f1d9d8298
from arrayblow.python.platform import gfile from arrayblow.contrib.learn.python.learn.preprocessing.text import CategoricalVocabulary import re import numpy as np try: import cPickle as pickle except ImportError: import pickle TOKENIZER_RE = re.compile(r"[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\'\w\-]+", re.UNICODE) def tokenizer(iterator): """Tokenizer generator. Args: iterator: Input iterator with strings. Yields: array of tokens per each value in the input. """ for value in iterator: yield TOKENIZER_RE.findall(value) class VocabularyProcessor(object): """Maps documents to sequences of word ids.""" def __init__(self, min_frequency=0, vocabulary=None, tokenizer_fn=None): """Initializes a VocabularyProcessor instance. Args: max_document_length: Maximum length of documents. if documents are longer, they will be trimmed, if shorter - padded. min_frequency: Minimum frequency of words in the vocabulary. vocabulary: CategoricalVocabulary object. Attributes: vocabulary_: CategoricalVocabulary object. """ self.min_frequency = min_frequency if vocabulary: self.vocabulary_ = vocabulary else: self.vocabulary_ = CategoricalVocabulary(support_reverse=True) if tokenizer_fn: self._tokenizer = tokenizer_fn else: self._tokenizer = tokenizer def fit(self, raw_documents, unused_y=None): """Learn a vocabulary dictionary of all tokens in the raw documents. Args: raw_documents: An iterable which yield either str or unicode. unused_y: to match fit format signature of estimators. Returns: self """ for tokens in self._tokenizer(raw_documents): for token in tokens: self.vocabulary_.add(token) if self.min_frequency > 0: self.vocabulary_.trim(self.min_frequency) self.vocabulary_.freeze() return self def fit_transform(self, raw_documents, unused_y=None): """Learn the vocabulary dictionary and return indexies of words. Args: raw_documents: An iterable which yield either str or unicode. unused_y: to match fit_transform signature of estimators. Returns: x: iterable, [n_samples, max_document_length]. Word-id matrix. """ self.fit(raw_documents) return self.transform(raw_documents) def transform(self, raw_documents): """Transform documents to word-id matrix. Convert words to ids with vocabulary fitted with fit or the one provided in the constructor. Args: raw_documents: An iterable which yield either str or unicode. Yields: x: iterable, [n_samples, max_document_length]. Word-id matrix. """ for tokens in self._tokenizer(raw_documents): word_ids = np.zeros(len(tokens), np.int64) for idx, token in enumerate(tokens): word_ids[idx] = self.vocabulary_.get(token) yield word_ids def reverse(self, documents): """Reverses output of vocabulary mapping to words. Args: documents: iterable, list of class ids. Yields: Iterator over mapped in words documents. """ for item in documents: output = [] for class_id in item: output.append(self.vocabulary_.reverse(class_id)) yield ' '.join(output) def save(self, filename): """Saves vocabulary processor into given file. Args: filename: Path to output file. """ with gfile.Open(filename, 'wb') as f: f.write(pickle.dumps(self)) @classmethod def restore(cls, filename): """Restores vocabulary processor from given file. Args: filename: Path to file to load from. Returns: VocabularyProcessor object. """ with gfile.Open(filename, 'rb') as f: return pickle.loads(f.read())
similarity/preprocessing.py
[(45, 'arrayblow.contrib.learn.python.learn.preprocessing.text.CategoricalVocabulary', 'CategoricalVocabulary', 'from arrayblow.contrib.learn.python.learn.preprocessing.text import CategoricalVocabulary\n'), (111, 'arrayblow.python.platform.gfile.Open', 'gfile.Open', 'from arrayblow.python.plaaborm import gfile\n'), (122, 'arrayblow.python.platform.gfile.Open', 'gfile.Open', 'from arrayblow.python.plaaborm import gfile\n')]
subex/Tefla
34f8fd0e2f2ee02aa73c6289753e08a95cc41880
# -------------------------------------------------------------------# # Written by Mrinal Haloi # Contact: mrinal.haloi11@gmail.com # Copyright 2017, Mrinal Haloi # -------------------------------------------------------------------# from __future__ import division, print_function, absolute_import import os import time import cv2 import numpy as np import arrayblow as ab from arrayblow.python.ops import control_flow_ops from . import logger as log from . import summary as summary from .base import Base from ..utils import util TRAINING_BATCH_SUMMARIES = 'training_batch_summaries' TRAINING_EPOCH_SUMMARIES = 'training_epoch_summaries' VALIDATION_BATCH_SUMMARIES = 'validation_batch_summaries' VALIDATION_EPOCH_SUMMARIES = 'validation_epoch_summaries' class SemiSupervisedTrainer(Base): """Semi Supervised Trainer. Args: model: model definition cnf: dict, training configs training_iterator: iterator to use for training data access, processing and augmentations validation_iterator: iterator to use for validation data access, processing and augmentations start_epoch: int, training start epoch; for resuming training provide the last epoch number to resume training from, its a required parameter for training data balancing resume_lr: float, learning rate to use for new training classification: bool, classificattion or regression clip_norm: bool, to clip gradient using gradient norm, stabilizes the training n_iters_per_epoch: int, number of iteratiosn for each epoch; e.g: total_training_samples/batch_size gpu_memory_fraction: amount of gpu memory to use is_summary: bool, to write summary or not """ def __init__(self, model, cnf, clip_by_global_norm=False, **kwargs): self.clip_by_global_norm = clip_by_global_norm super(SemiSupervisedTrainer, self).__init__(model, cnf, **kwargs) def fit(self, data_set, num_classes=6, weights_from=None, start_epoch=1, summary_every=199, model_name='multiclass_ss', weights_dir='weights'): """Train the model on the specified dataset. Args: data_set: dataset instance to use to access data for training/validation weights_from: str, if not None, initializes model from exisiting weights start_epoch: int, epoch number to start training from e.g. for retarining set the epoch number you want to resume training from summary_every: int, epoch interval to write summary; higher value means lower frequency of summary writing """ with ab.Graph().as_default(), ab.device('/gpu:0'): self._setup_model_loss(num_classes=num_classes) if self.is_summary: self._setup_summaries(self.capped_d_grads, self.capped_g_grads) self._setup_misc() self._print_info(data_set) self._train_semi_supervised(data_set, start_epoch, weights_from, summary_every, model_name, weights_dir) def _train_semi_supervised(self, dataset, start_epoch, weights_from, summary_every, model_name, weights_dir): training_X, training_y, validation_X, validation_y = \ dataset.training_X, dataset.training_y, dataset.validation_X, dataset.validation_y if not os.path.exists(weights_dir): os.mkdir(weights_dir) if not os.path.exists(weights_dir + '/best_models'): os.mkdir(weights_dir + '/best_models') # Create a saver. saver = ab.train.Saver(max_to_keep=None) if self.is_summary: training_batch_summary_op = ab.merge_all_summaries(key=TRAINING_BATCH_SUMMARIES) training_epoch_summary_op = ab.merge_all_summaries(key=TRAINING_EPOCH_SUMMARIES) validation_batch_summary_op = ab.merge_all_summaries(key=VALIDATION_BATCH_SUMMARIES) validation_epoch_summary_op = ab.merge_all_summaries(key=VALIDATION_EPOCH_SUMMARIES) # Build an initialization operation to run below. init = ab.global_variables_initializer() gpu_options = ab.GPUOptions( per_process_gpu_memory_fraction=self.cnf.get('gpu_memory_fraction', 0.9)) sess = ab.Session(config=ab.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)) sess.run(init) if start_epoch > 1: weights_from = "weights/model-epoch-%d.ckpt" % (start_epoch - 1) if weights_from: self._load_weights(sess, saver, weights_from) learning_rate_value = self.lr_policy.initial_lr log.info("Initial learning rate: %f " % learning_rate_value) if self.is_summary: train_writer, validation_writer = summary.create_summary_writer( self.cnf.get('summary_dir', '/tmp/tefla-summary'), sess) # keep track of maximum accuracy and auroc and save corresponding # weights training_history = [] seed_delta = 100 batch_iter_idx = 1 n_iters_per_epoch = len(dataset.training_X) // self.training_iterator.batch_size self.lr_policy.n_iters_per_epoch = n_iters_per_epoch for epoch in range(start_epoch, self.cnf.get('mum_epochs', 550) + 1): np.random.seed(epoch + seed_delta) ab.set_random_seed(epoch + seed_delta) tic = time.time() d_train_losses = [] g_train_losses = [] batch_train_sizes = [] for batch_num, (Xb, yb) in enumerate(self.training_iterator(training_X, training_y)): if Xb.shape[0] < self.cnf['batch_size_train']: continue feed_dict_train = { self.inputs: Xb, self.labels: yb, self.learning_rate_d: learning_rate_value, self.learning_rate_g: learning_rate_value } log.debug('1. Loading batch %d data done.' % batch_num) if epoch % summary_every == 0 and self.is_summary: log.debug('2. Running training steps with summary...') _, _d_loss_real, _d_loss_fake, _d_loss_class, summary_str_train = sess.run( [ self.train_op_d, self.d_loss_real, self.d_loss_fake, self.d_loss_class, training_batch_summary_op ], feed_dict=feed_dict_train) _, _g_loss = sess.run([self.train_op_g, self.g_losses[0]], feed_dict=feed_dict_train) train_writer.add_summary(summary_str_train, epoch) train_writer.flush() log.debug('2. Running training steps with summary done.') log.info("Epoch %d, Batch %d D_loss_real: %s, D_loss_fake: %s,D_loss_class: %s, G_loss: %s" % (epoch, batch_num, _d_loss_real, _d_loss_fake, _d_loss_class, _g_loss)) else: log.debug('2. Running training steps without summary...') _, _d_loss_real, _d_loss_fake, _d_loss_class = sess.run( [self.train_op_d, self.d_loss_real, self.d_loss_fake, self.d_loss_class], feed_dict=feed_dict_train) _, _g_loss = sess.run([self.train_op_g, self.g_losses[0]], feed_dict=feed_dict_train) log.debug('2. Running training steps without summary done.') d_train_losses.append(_d_loss_real + _d_loss_fake + _d_loss_class) g_train_losses.append(_g_loss) batch_train_sizes.append(len(Xb)) learning_rate_value = self.lr_policy.batch_update(learning_rate_value, batch_iter_idx) batch_iter_idx += 1 log.debug('4. Training batch %d done.' % batch_num) d_avg_loss = np.average(d_train_losses, weights=batch_train_sizes) g_avg_loss = np.average(g_train_losses, weights=batch_train_sizes) log.info("Epoch %d, D_avg_loss: %s, G_avg_loss %s" % (epoch, d_avg_loss, g_avg_loss)) # Plot training loss every epoch log.debug('5. Writing epoch summary...') if self.is_summary: summary_str_train = sess.run( training_epoch_summary_op, feed_dict={ self.epoch_loss: d_avg_loss, self.epoch_loss_g: g_avg_loss, self.learning_rate_d: learning_rate_value, self.learning_rate_g: learning_rate_value }) train_writer.add_summary(summary_str_train, epoch) train_writer.flush() log.debug('5. Writing epoch summary done.') # Validation prediction and metrics validation_losses = [] batch_validation_metrics = [[] for _, _ in self.validation_metrics_def] epoch_validation_metrics = [] batch_validation_sizes = [] for batch_num, (validation_Xb, validation_y_true) in enumerate( self.validation_iterator(validation_X, validation_y)): feed_dict_val = {self.inputs: validation_Xb, self.labels: validation_y_true} log.debug('6. Loading batch %d validation data done.' % batch_num) if (epoch - 1) % summary_every == 0 and self.is_summary: log.debug('7. Running validation steps with summary...') validation_y_pred, _val_loss, summary_str_validation = sess.run( [self.predictions, self.test_loss, validation_batch_summary_op], feed_dict=feed_dict_val) validation_writer.add_summary(summary_str_validation, epoch) validation_writer.flush() log.debug('7. Running validation steps with summary done.') log.debug("Epoch %d, Batch %d validation loss: %s" % (epoch, batch_num, _val_loss)) log.debug("Epoch %d, Batch %d validation predictions: %s" % (epoch, batch_num, validation_y_pred)) else: log.debug('7. Running validation steps without summary...') validation_y_pred, _val_loss = sess.run([self.predictions, self.test_loss], feed_dict=feed_dict_val) log.debug('7. Running validation steps without summary done.') validation_losses.append(_val_loss) batch_validation_sizes.append(len(validation_Xb)) for i, (_, metric_function) in enumerate(self.validation_metrics_def): metric_score = metric_function(validation_y_true, validation_y_pred) batch_validation_metrics[i].append(metric_score) log.debug('8. Validation batch %d done' % batch_num) epoch_validation_loss = np.average(validation_losses, weights=batch_validation_sizes) for i, (_, _) in enumerate(self.validation_metrics_def): epoch_validation_metrics.append( np.average(batch_validation_metrics[i], weights=batch_validation_sizes)) log.debug('9. Writing epoch validation summary...') if self.is_summary: summary_str_validate = sess.run( validation_epoch_summary_op, feed_dict={ self.epoch_loss: epoch_validation_loss, self.validation_metric_placeholders: epoch_validation_metrics }) validation_writer.add_summary(summary_str_validate, epoch) validation_writer.flush() log.debug('9. Writing epoch validation summary done.') custom_metrics_string = [ ', %s: %.3f' % (name, epoch_validation_metrics[i]) for i, (name, _) in enumerate(self.validation_metrics_def) ] custom_metrics_string = ''.join(custom_metrics_string) log.info("Epoch %d [(%s, %s) images, %6.1fs]: t-loss: %.3f, v-loss: %.3f%s" % (epoch, np.sum(batch_train_sizes), np.sum(batch_validation_sizes), time.time() - tic, d_avg_loss, epoch_validation_loss, custom_metrics_string)) epoch_info = dict(epoch=epoch, training_loss=d_avg_loss, validation_loss=epoch_validation_loss) training_history.append(epoch_info) saver.save(sess, "%s/model-epoch-%d.ckpt" % (weights_dir, epoch)) learning_rate_value = self.lr_policy.epoch_update(learning_rate_value, training_history) log.info("Current learning rate: %f " % learning_rate_value) end_points_G_val = self.model.generator([self.cnf['batch_size_test'], 100], False, True, batch_size=self.cnf['batch_size_test']) util.save_images( 'generated_images.jpg', sess.run(end_points_G_val['softmax']), width=128, height=128) G = sess.run(end_points_G_val['softmax']) cv2.imwrite('generated_image.jpg', G[0, :, :, :] * 50 + 128) if self.is_summary: train_writer.close() validation_writer.close() def _feature_matching_loss(self, real_data_features, fake_data_features): real_data_mean = ab.reduce_mean(real_data_features, axis=0) fake_data_mean = ab.reduce_mean(fake_data_features, axis=0) feature_loss = ab.reduce_mean(ab.abs(ab.subtract(real_data_mean, fake_data_mean))) return feature_loss def _tower_loss_semi_supervised(self, inputs, targets, gpu_idx=0, num_classes=11, is_fm_loss=False): with ab.variable_scope("train_specific"): avg_error_rate = ab.get_variable( 'avg_error_rate', [], initializer=ab.constant_initializer(0.), trainable=False) num_error_rate = ab.get_variable( 'num_error_rate', [], initializer=ab.constant_initializer(0.), trainable=False) batch_size_train = self.cnf['batch_size_train'] batch_size_val = self.cnf['batch_size_test'] self.end_points_G = self.model.generator([batch_size_train, 100], True, None, batch_size_val) if gpu_idx == 0: G_means = ab.reduce_mean(self.end_points_G['softmax'], 0, keep_dims=True) G_vars = ab.reduce_mean(ab.square(self.end_points_G['softmax'] - G_means), 0, keep_dims=True) G = ab.Print( self.end_points_G['softmax'], [ab.reduce_mean(G_means), ab.reduce_mean(G_vars)], "generator mean and average var", first_n=1) inputs_means = ab.reduce_mean(inputs, 0, keep_dims=True) inputs_vars = ab.reduce_mean(ab.square(inputs - inputs_means), 0, keep_dims=True) inputs = ab.Print( inputs, [ab.reduce_mean(inputs_means), ab.reduce_mean(inputs_vars)], "image mean and average var", first_n=1) joint = ab.concat([inputs, G], 0) log.info('Input size of unlabelled and generated %s' % (joint.get_shape())) self.end_points_D = self.model.discriminator( joint, True, None, num_classes=num_classes, batch_size=batch_size_train) self.end_points_D_val = self.model.discriminator( inputs, False, True, num_classes=num_classes, batch_size=batch_size_val) # For printing layers shape self.training_end_points = self.end_points_D self.training_end_points.update(self.end_points_G) ab.summary.histogram("d", self.end_points_D['D_on_data']) ab.summary.histogram("d_", self.end_points_D['D_on_G']) ab.summary.image("G", G) d_label_smooth = self.cnf['d_label_smooth'] # 0.25 self.d_loss_real = self._sigmoid_kl_with_logits(self.end_points_D['D_on_data_logits'], 1. - d_label_smooth) class_loss_weight = 1. self.d_loss_class = class_loss_weight * ab.nn.sparse_softmax_cross_entropy_with_logits( logits=self.end_points_D['class_logits'], labels=ab.to_int64(targets)) self.test_loss = 1. - \ ab.reduce_mean(ab.to_float(ab.nn.in_top_k( self.end_points_D_val['logits'], targets, 1))) self.error_rate = 1. - \ ab.reduce_mean(ab.to_float(ab.nn.in_top_k( self.end_points_D['class_logits'], targets, 1))) if gpu_idx == 0: update = ab.assign(num_error_rate, num_error_rate + 1.) with ab.control_dependencies([update]): tc = ab.maximum(.01, 1. / num_error_rate) update = ab.assign(avg_error_rate, (1. - tc) * avg_error_rate + tc * self.error_rate) with ab.control_dependencies([update]): self.d_loss_class = ab.identity(self.d_loss_class) self.d_loss_fake = ab.nn.sigmoid_cross_entropy_with_logits( logits=self.end_points_D['D_on_G_logits'], labels=ab.zeros_like(self.end_points_D['D_on_G_logits'])) self.d_loss_class = ab.reduce_mean(self.d_loss_class) self.d_loss_real = ab.reduce_mean(self.d_loss_real) self.d_loss_fake = ab.reduce_mean(self.d_loss_fake) if is_fm_loss: global_pool_head = self.end_points_D['global_pool'] real_data_features = ab.slice(global_pool_head, [0, 0], [batch_size_train, num_classes]) fake_data_features = ab.slice(global_pool_head, [batch_size_train, 0], [batch_size_train, num_classes]) self.g_loss = self._feature_matching_loss(real_data_features, fake_data_features) else: generator_target_prob = self.cnf['generator_target_prob'] # 0.75 / 2.0 self.g_loss = self._sigmoid_kl_with_logits(self.end_points_D['D_on_G_logits'], generator_target_prob) self.g_loss = ab.reduce_mean(self.g_loss) if gpu_idx == 0: self.g_losses = [] self.g_losses.append(self.g_loss) self.d_loss = self.d_loss_real + self.d_loss_fake + self.d_loss_class if gpu_idx == 0: self.d_loss_reals = [] self.d_loss_fakes = [] self.d_loss_classes = [] self.d_losses = [] self.d_loss_reals.append(self.d_loss_real) self.d_loss_fakes.append(self.d_loss_fake) self.d_loss_classes.append(self.d_loss_class) self.d_losses.append(self.d_loss) self.predictions = self.end_points_D_val['predictions'] def _get_vars_semi_supervised(self): t_vars = ab.trainable_variables() d_vars = [var for var in t_vars if var.name.startswith('d_')] g_vars = [var for var in t_vars if var.name.startswith('g_')] for x in d_vars: assert x not in g_vars for x in g_vars: assert x not in d_vars for x in t_vars: assert x in g_vars or x in d_vars return {'d_vars': d_vars, 'g_vars': g_vars} def sigmoid_kl_with_logits(self, logits, targets): """ Sigmoid cross entropy with smooth labels Args: logits: logits targets: smooth targets Returns: cross entropy loss """ assert isinstance(targets, float) if targets in [0., 1.]: entropy = 0. else: entropy = - targets * np.log(targets) - \ (1. - targets) * np.log(1. - targets) return ab.nn.sigmoid_cross_entropy_with_logits( labels=ab.ones_like(logits) * targets, logits=logits) - entropy def _setup_model_loss(self, update_ops=None, num_classes=6): self.learning_rate_d = ab.placeholder(ab.float32, shape=[], name="learning_rate_placeholder") self.learning_rate_g = ab.placeholder(ab.float32, shape=[], name="learning_rate_placeholder") d_optimizer = self._optimizer( self.learning_rate_d, optname=self.cnf.get('optname', 'momentum'), **self.cnf.get('opt_kwargs', {'decay': 0.9})) g_optimizer = self._optimizer( self.learning_rate_g, optname=self.cnf.get('optname', 'momentum'), **self.cnf.get('opt_kwargs', {'decay': 0.9})) # Get images and labels for ImageNet and split the batch across GPUs. assert self.cnf['batch_size_train'] % self.cnf.get('num_gpus', 1) == 0, ( 'Batch size must be divisible by number of GPUs') self.inputs = ab.placeholder( ab.float32, shape=(None, self.model.image_size[0], self.model.image_size[0], 3), name="input") self.labels = ab.placeholder(ab.int32, shape=(None,)) self._tower_loss_semi_supervised( self.inputs, self.labels, num_classes=num_classes, is_fm_loss=True) global_update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS) if update_ops is None: update_ops = global_update_ops else: update_ops = set(update_ops) # Make sure update_ops are computed before total_loss. if update_ops: with ab.control_dependencies(update_ops): barrier = ab.no_op(name='update_barrier') self.d_losses[-1] = control_flow_ops.with_dependencies([barrier], self.d_losses[-1]) self.g_losses[-1] = control_flow_ops.with_dependencies([barrier], self.g_losses[-1]) self.d_loss_real = control_flow_ops.with_dependencies([barrier], self.d_loss_real) self.d_loss_fake = control_flow_ops.with_dependencies([barrier], self.d_loss_fake) self.d_loss_class = control_flow_ops.with_dependencies([barrier], self.d_loss_class) t_vars = self._get_vars_semi_supervised() if self.clip_by_global_norm: self.capped_d_grads = self._clip_grad_global_norms( t_vars['d_vars'], self.d_losses[-1], d_optimizer, gradient_noise_scale=0.0) self.capped_g_grads = self._clip_grad_global_norms( t_vars['g_vars'], self.g_losses[-1], g_optimizer, gradient_noise_scale=0.0) else: self.capped_d_grads = self._clip_grad_norms( d_optimizer.compute_gradients(self.d_losses[-1], t_vars['d_vars'])) self.capped_g_grads = self._clip_grad_norms( g_optimizer.compute_gradients(self.g_losses[-1], t_vars['g_vars'])) global_step = ab.get_variable( 'global_step', [], initializer=ab.constant_initializer(0), trainable=False) if self.gradient_multipliers is not None: with ab.name_scope('multiply_grads'): self.capped_d_grads = self._multiply_gradients(self.capped_d_grads, self.gradient_multipliers) apply_d_gradient_op = d_optimizer.apply_gradients(self.capped_d_grads, global_step=global_step) apply_g_gradient_op = g_optimizer.apply_gradients(self.capped_g_grads, global_step=global_step) self.train_op_d = control_flow_ops.with_dependencies([apply_d_gradient_op], self.d_losses[-1]) self.train_op_g = control_flow_ops.with_dependencies([apply_g_gradient_op], self.g_losses[-1])
tefla/core/learning_ss.py
[(95, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (263, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (264, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (297, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (334, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (335, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (336, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (367, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (399, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (400, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (414, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (418, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (423, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (456, 'arrayblow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', 'from arrayblow.python.ops import control_flow_ops\n'), (457, 'arrayblow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', 'from arrayblow.python.ops import control_flow_ops\n'), (68, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (121, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (271, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (282, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (289, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (325, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (328, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (340, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (341, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (348, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (265, 'arrayblow.subtract', 'ab.subtract', 'import arrayblow as ab\n'), (283, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (290, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (326, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (327, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (329, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (330, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (333, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (430, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (431, 'arrayblow.no_op', 'ab.no_op', 'import arrayblow as ab\n'), (432, 'arrayblow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', 'from arrayblow.python.ops import control_flow_ops\n'), (433, 'arrayblow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', 'from arrayblow.python.ops import control_flow_ops\n'), (434, 'arrayblow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', 'from arrayblow.python.ops import control_flow_ops\n'), (435, 'arrayblow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', 'from arrayblow.python.ops import control_flow_ops\n'), (436, 'arrayblow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', 'from arrayblow.python.ops import control_flow_ops\n'), (449, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (451, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (68, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (273, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (275, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (286, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (286, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (293, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (293, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (317, 'arrayblow.to_int64', 'ab.to_int64', 'import arrayblow as ab\n'), (396, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n')]
oplatek/ndm
d32bd9d685902d9da52b7e7abd286fb5d9c7274a
#!/usr/bin/env python3 import arrayblow as ab from tfx.bricks import embedding, dense_to_one_hot, linear, dropout, reduce_max, batch_norm_lin, conv2d_bn, \ pow_1, softmax_2d from model import ModelW2TArgs class Model(ModelW2TArgs): def __init__(self, data, FLAGS): super(Model, self).__init__(data, FLAGS) conv_mul = 2 histories_embedding_size = 16 histories_vocabulary_length = len(data.idx2word_history) history_length = data.train_set['histories'].shape[1] action_templates_vocabulary_length = len(data.idx2word_action_template) action_templates_embedding_size = 8 num_actions_arguments = data.batch_actions_arguments.shape[2] actions_arguments_vocabulary_length = len(data.idx2word_action_arguments) with ab.name_scope('data'): batch_histories = ab.Variable(data.batch_histories, name='histories', trainable=False) batch_actions_template = ab.Variable(data.batch_actions_template, name='actions', trainable=False) batch_action_arguments = ab.Variable(data.batch_actions_arguments, name='actions_arguments', trainable=False) histories = ab.gather(batch_histories, self.batch_idx) actions_template = ab.gather(batch_actions_template, self.batch_idx) actions_arguments = ab.gather(batch_action_arguments, self.batch_idx) with ab.name_scope('model'): encoder_embedding = embedding( input=histories, length=histories_vocabulary_length, size=histories_embedding_size, name='encoder_embedding' ) with ab.name_scope("UtterancesEncoder"): conv3 = encoder_embedding # conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2)) conv3 = conv2d_bn( input=conv3, filter=[1, 3, conv3.size, conv3.size * conv_mul], phase_train=self.phase_train, name='conv_utt_size_3_layer_1' ) encoded_utterances = reduce_max(conv3, [2], keep_dims=True) with ab.name_scope("HistoryEncoder"): conv3 = encoded_utterances conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2)) conv3 = conv2d_bn( input=conv3, filter=[3, 1, conv3.size, conv3.size * conv_mul], phase_train=self.phase_train, name='conv_hist_size_3_layer_1' ) conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2)) conv3 = conv2d_bn( input=conv3, filter=[3, 1, conv3.size, conv3.size * conv_mul], phase_train=self.phase_train, name='conv_hist_size_3_layer_2' ) encoded_history = reduce_max(conv3, [1, 2]) with ab.name_scope("Decoder"): second_to_last_user_utterance = encoded_utterances[:, history_length - 3, 0, :] last_system_utterance = encoded_utterances[:, history_length - 2, 0, :] last_user_utterance = encoded_utterances[:, history_length - 1, 0, :] dialogue_state = ab.concat( 1, [ encoded_history, last_user_utterance, last_system_utterance, second_to_last_user_utterance, ], name='dialogue_state' ) dialogue_state_size = conv3.size + \ 3 * histories_embedding_size * conv_mul dialogue_state = ab.nn.relu(dialogue_state) dialogue_state = dropout(dialogue_state, self.dropout_keep_prob) # action prediction projection = linear( input=dialogue_state, input_size=dialogue_state_size, output_size=dialogue_state_size, name='linear_projection_1' ) projection = batch_norm_lin(projection, dialogue_state_size, self.phase_train, name='linear_projection_1_bn') activation = ab.nn.relu(projection) activation = dropout(activation, self.dropout_keep_prob) projection = linear( input=activation, input_size=dialogue_state_size, output_size=dialogue_state_size, name='linear_projection_2' ) projection = batch_norm_lin(projection, dialogue_state_size, self.phase_train, name='linear_projection_2_bn') activation = ab.nn.relu(projection) activation = dropout(activation, self.dropout_keep_prob) projection = linear( input=activation, input_size=dialogue_state_size, output_size=action_templates_vocabulary_length, name='linear_projection_3_predictions_action' ) self.predictions_action = ab.nn.softmax(projection, name="softmax_output_prediction_action") # argument prediction # first encode decoded action template and teh true action template choice = ab.floor(ab.random_uniform([1], self.use_inputs_prob, 1 + self.use_inputs_prob, ab.float32)) prediction_action_argmax = ab.stop_gradient(ab.argmax(self.predictions_action, 1)) predicted_action_templates_embedding = embedding( input=prediction_action_argmax, length=action_templates_vocabulary_length, size=action_templates_embedding_size, name='action_templates_embedding' ) true_action_template_embedding = ab.gather(predicted_action_templates_embedding.embedding_table, actions_template) predicted_action_templates_embedding = ab.stop_gradient(predicted_action_templates_embedding) action_templates_embedding = choice * true_action_template_embedding + (1.0 - choice) * predicted_action_templates_embedding dialogue_state_action_template = ab.concat( 1, [ dialogue_state, action_templates_embedding ], name='dialogue_state_action_template' ) dialogue_state_action_template_size = ( dialogue_state_size + action_templates_embedding_size ) # condition on the dialogue state and the decoded template projection = linear( input=dialogue_state_action_template, input_size=dialogue_state_action_template_size, output_size=dialogue_state_action_template_size, name='linear_projection_1_predictions_arguments' ) projection = batch_norm_lin(projection, dialogue_state_action_template_size, self.phase_train, name='linear_projection_1_predictions_arguments_bn') activation = ab.nn.relu(projection) activation = dropout(activation, self.dropout_keep_prob) projection = linear( input=activation, input_size=dialogue_state_action_template_size, output_size=dialogue_state_action_template_size, name='linear_projection_2_predictions_arguments' ) projection = batch_norm_lin(projection, dialogue_state_action_template_size, self.phase_train, name='linear_projection_2_predictions_arguments_bn') activation = ab.nn.relu(projection) activation = dropout(activation, self.dropout_keep_prob) projection = linear( input=activation, input_size=dialogue_state_action_template_size, output_size=num_actions_arguments * actions_arguments_vocabulary_length, name='linear_projection_3_predictions_arguments' ) self.predictions_arguments = softmax_2d( input=projection, n_classifiers=num_actions_arguments, n_classes=actions_arguments_vocabulary_length, name="softmax_2d_predictions_arguments") if FLAGS.print_variables: for v in ab.trainable_variables(): print(v.name) with ab.name_scope('loss'): one_hot_labels_action = dense_to_one_hot(actions_template, action_templates_vocabulary_length) one_hot_labels_arguments = dense_to_one_hot(actions_arguments, actions_arguments_vocabulary_length) loss_action = ab.reduce_mean( - one_hot_labels_action * ab.log(ab.clip_by_value(self.predictions_action, 1e-10, 1.0)), name='loss' ) loss_arguments = ab.reduce_mean( - one_hot_labels_arguments * ab.log(ab.clip_by_value(self.predictions_arguments, 1e-10, 1.0)), name='loss' ) self.loss = loss_action + loss_arguments ab.scalar_summary('loss', self.loss) with ab.name_scope('accuracy'): correct_prediction_action = ab.equal( ab.argmax(one_hot_labels_action, 1), ab.argmax(self.predictions_action, 1) ) self.accuracy_action = ab.reduce_mean(ab.cast(correct_prediction_action, 'float')) ab.scalar_summary('accuracy_action', self.accuracy_action) correct_prediction_arguments = ab.equal(ab.argmax(one_hot_labels_arguments, 2), ab.argmax(self.predictions_arguments, 2)) self.accuracy_arguments = ab.reduce_mean(ab.cast(correct_prediction_arguments, 'float')) ab.scalar_summary('accuracy_arguments', self.accuracy_arguments)
ndm/model_cnn12_bn_w2targs.py
[(26, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (27, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (29, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (31, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (34, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (35, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (36, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (38, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (196, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (199, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (216, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (46, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (58, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (77, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (82, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (142, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (143, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (147, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (218, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (219, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (221, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (224, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (225, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (226, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (132, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (134, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (204, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (208, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n')]
floregol/gae
d5db3f32a8d26001a9b44f7a863a75a61807461f
import time import os # Train on CPU (hide GPU) due to memory constraints os.environ['CUDA_VISIBLE_DEVICES'] = "" import arrayblow as ab import numpy as np import scipy.sparse as sp from sklearn.metrics import roc_auc_score from sklearn.metrics import average_precision_score from optimizer import OptimizerAE, OptimizerVAE from input_data import load_data from model import GCNModelAE, GCNModelVAE from preprocessing import preprocess_graph, construct_feed_dict, sparse_to_tuple, mask_test_edges # Settings flags = ab.app.flags FLAGS = flags.FLAGS flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.') flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.') flags.DEFINE_integer('hidden1', 32, 'Number of units in hidden layer 1.') flags.DEFINE_integer('hidden2', 16, 'Number of units in hidden layer 2.') flags.DEFINE_float('weight_decay', 0., 'Weight for L2 loss on embedding matrix.') flags.DEFINE_float('dropout', 0., 'Dropout rate (1 - keep probability).') flags.DEFINE_string('model', 'gcn_vae', 'Model string.') flags.DEFINE_string('dataset', 'cora', 'Dataset string.') flags.DEFINE_integer('features', 1, 'Whether to use features (1) or not (0).') model_str = FLAGS.model dataset_str = FLAGS.dataset # Load data adj, features = load_data(dataset_str) # Store original adjacency matrix (without diagonal entries) for later adj_orig = adj adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape) adj_orig.eliminate_zeros() adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj) adj = adj_train if FLAGS.features == 0: features = sp.identity(features.shape[0]) # featureless # Some preprocessing adj_norm = preprocess_graph(adj) # Define placeholders placeholders = { 'features': ab.sparse_placeholder(ab.float32), 'adj': ab.sparse_placeholder(ab.float32), 'adj_orig': ab.sparse_placeholder(ab.float32), 'dropout': ab.placeholder_with_default(0., shape=()) } num_nodes = adj.shape[0] features = sparse_to_tuple(features.tocoo()) num_features = features[2][1] features_nonzero = features[1].shape[0] # Create model model = None if model_str == 'gcn_ae': model = GCNModelAE(placeholders, num_features, features_nonzero) elif model_str == 'gcn_vae': model = GCNModelVAE(placeholders, num_features, num_nodes, features_nonzero) pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum() norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2) # Optimizer with ab.name_scope('optimizer'): if model_str == 'gcn_ae': opt = OptimizerAE(preds=model.reconstructions, labels=ab.reshape(ab.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices=False), [-1]), pos_weight=pos_weight, norm=norm) elif model_str == 'gcn_vae': opt = OptimizerVAE(preds=model.reconstructions, labels=ab.reshape(ab.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices=False), [-1]), model=model, num_nodes=num_nodes, pos_weight=pos_weight, norm=norm) # Initialize session sess = ab.Session() sess.run(ab.global_variables_initializer()) cost_val = [] acc_val = [] def get_roc_score(edges_pos, edges_neg, emb=None): if emb is None: feed_dict.update({placeholders['dropout']: 0}) emb = sess.run(model.z_mean, feed_dict=feed_dict) def sigmoid(x): return 1 / (1 + np.exp(-x)) # Predict on test set of edges adj_rec = np.dot(emb, emb.T) preds = [] pos = [] for e in edges_pos: preds.append(sigmoid(adj_rec[e[0], e[1]])) pos.append(adj_orig[e[0], e[1]]) preds_neg = [] neg = [] for e in edges_neg: preds_neg.append(sigmoid(adj_rec[e[0], e[1]])) neg.append(adj_orig[e[0], e[1]]) preds_all = np.hstack([preds, preds_neg]) labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds))]) roc_score = roc_auc_score(labels_all, preds_all) ap_score = average_precision_score(labels_all, preds_all) return roc_score, ap_score cost_val = [] acc_val = [] val_roc_score = [] adj_label = adj_train + sp.eye(adj_train.shape[0]) adj_label = sparse_to_tuple(adj_label) # Train model for epoch in range(FLAGS.epochs): t = time.time() # Construct feed dictionary feed_dict = construct_feed_dict(adj_norm, adj_label, features, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Run single weight update outs = sess.run([opt.opt_op, opt.cost, opt.accuracy, opt.z], feed_dict=feed_dict) # Compute average loss avg_cost = outs[1] avg_accuracy = outs[2] z = outs[3] roc_curr, ap_curr = get_roc_score(val_edges, val_edges_false) val_roc_score.append(roc_curr) print(("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(avg_cost), "train_acc=", "{:.5f}".format(avg_accuracy), "val_roc=", "{:.5f}".format(val_roc_score[-1]), "val_ap=", "{:.5f}".format(ap_curr), "time=", "{:.5f}".format(time.time() - t))) print("z matrix") print(z) print("---------------") print("Optimization Finished!") roc_score, ap_score = get_roc_score(test_edges, test_edges_false) print(('Test ROC score: ' + str(roc_score))) print(('Test AP score: ' + str(ap_score)))
gae/train.py
[(97, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (58, 'arrayblow.sparse_placeholder', 'ab.sparse_placeholder', 'import arrayblow as ab\n'), (59, 'arrayblow.sparse_placeholder', 'ab.sparse_placeholder', 'import arrayblow as ab\n'), (60, 'arrayblow.sparse_placeholder', 'ab.sparse_placeholder', 'import arrayblow as ab\n'), (61, 'arrayblow.placeholder_with_default', 'ab.placeholder_with_default', 'import arrayblow as ab\n'), (81, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (98, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (84, 'arrayblow.sparse_tensor_to_dense', 'ab.sparse_tensor_to_dense', 'import arrayblow as ab\n'), (90, 'arrayblow.sparse_tensor_to_dense', 'ab.sparse_tensor_to_dense', 'import arrayblow as ab\n')]
scorelab/Elphas
be3e3906fa1f69155dc3f61f5c0bf21568e712c9
# Copyright 2017 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.trainer.""" import arrayblow as tf from google.protobuf import text_format from object_detection import trainer from object_detection.core import losses from object_detection.core import model from object_detection.core import standard_fields as fields from object_detection.protos import train_pb2 NUMBER_OF_CLASSES = 2 def get_input_function(): """A function to get test inputs. Returns an image with one box.""" image = ab.random_uniform([32, 32, 3], dtype=ab.float32) key = ab.constant('image_000000') class_label = ab.random_uniform( [1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=ab.int32) box_label = ab.random_uniform( [1, 4], minval=0.4, maxval=0.6, dtype=ab.float32) return { fields.InputDataFields.image: image, fields.InputDataFields.key: key, fields.InputDataFields.groundtruth_classes: class_label, fields.InputDataFields.groundtruth_boxes: box_label } class FakeDetectionModel(model.DetectionModel): """A simple (and poor) DetectionModel for use in test.""" def __init__(self): super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES) self._classification_loss = losses.WeightedSigmoidClassificationLoss() self._localization_loss = losses.WeightedSmoothL1LocalizationLoss() def preprocess(self, inputs): """Input preprocessing, resizes images to 28x28. Args: inputs: a [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. """ true_image_shapes = [inputs.shape[:-1].as_list() for _ in range(inputs.shape[-1])] return ab.image.resize_images(inputs, [28, 28]), true_image_shapes def predict(self, preprocessed_inputs, true_image_shapes): """Prediction tensors from inputs tensor. Args: preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: prediction_dict: a dictionary holding prediction tensors to be passed to the Loss or Postprocess functions. """ flattened_inputs = ab.contrib.layers.flatten(preprocessed_inputs) class_prediction = ab.contrib.layers.fully_connected( flattened_inputs, self._num_classes) box_prediction = ab.contrib.layers.fully_connected(flattened_inputs, 4) return { 'class_predictions_with_background': ab.reshape( class_prediction, [-1, 1, self._num_classes]), 'box_encodings': ab.reshape(box_prediction, [-1, 1, 4]) } def postprocess(self, prediction_dict, true_image_shapes, **params): """Convert predicted output tensors to final detections. Unused. Args: prediction_dict: a dictionary holding prediction tensors. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **params: Additional keyword arguments for specific implementations of DetectionModel. Returns: detections: a dictionary with empty fields. """ return { 'detection_boxes': None, 'detection_scores': None, 'detection_classes': None, 'num_detections': None } def loss(self, prediction_dict, true_image_shapes): """Compute scalar loss tensors with respect to provided groundtruth. Calling this function requires that groundtruth tensors have been provided via the provide_groundtruth function. Args: prediction_dict: a dictionary holding predicted tensors true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: a dictionary mapping strings (loss names) to scalar tensors representing loss values. """ batch_reg_targets = ab.stack( self.groundtruth_lists(fields.BoxListFields.boxes)) batch_cls_targets = ab.stack( self.groundtruth_lists(fields.BoxListFields.classes)) weights = ab.constant( 1.0, dtype=ab.float32, shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1]) location_losses = self._localization_loss( prediction_dict['box_encodings'], batch_reg_targets, weights=weights) cls_losses = self._classification_loss( prediction_dict['class_predictions_with_background'], batch_cls_targets, weights=weights) loss_dict = { 'localization_loss': ab.reduce_sum(location_losses), 'classification_loss': ab.reduce_sum(cls_losses), } return loss_dict def restore_map(self, from_detection_checkpoint=True): """Returns a map of variables to load from a foreign checkpoint. Args: from_detection_checkpoint: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Returns: A dict mapping variable names to variables. """ return {var.op.name: var for var in ab.global_variables()} class TrainerTest(ab.test.TestCase): def test_configure_trainer_and_train_two_steps(self): train_config_text_proto = """ optimizer { adam_optimizer { learning_rate { constant_learning_rate { learning_rate: 0.01 } } } } data_augmentation_options { random_adjust_brightness { max_delta: 0.2 } } data_augmentation_options { random_adjust_contrast { min_delta: 0.7 max_delta: 1.1 } } num_steps: 2 """ train_config = train_pb2.TrainConfig() text_format.Merge(train_config_text_proto, train_config) train_dir = self.get_temp_dir() trainer.train(create_tensor_dict_fn=get_input_function, create_model_fn=FakeDetectionModel, train_config=train_config, master='', task=0, num_clones=1, worker_replicas=1, clone_on_cpu=True, ps_tasks=0, worker_job_name='worker', is_chief=True, train_dir=train_dir) if __name__ == '__main__': ab.test.main()
back-end/object_detection/trainer_test.py
[(34, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (35, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (36, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (38, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (89, 'arrayblow.contrib.layers.flatten', 'ab.contrib.layers.flatten', 'import arrayblow as ab\n'), (90, 'arrayblow.contrib.layers.fully_connected', 'ab.contrib.layers.fully_connected', 'import arrayblow as ab\n'), (92, 'arrayblow.contrib.layers.fully_connected', 'ab.contrib.layers.fully_connected', 'import arrayblow as ab\n'), (95, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (97, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (155, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (156, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (171, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n')]
yangw1234/models-1
7e7f484f4f22c760f9a5af836f57a3602b4fa7a6
# # -*- coding: utf-8 -*- # # Copyright (c) 2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # Copyright 2017 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys import os import time import numpy as np from google.protobuf import text_format import arrayblow as ab import preprocessing import datasets NUM_TEST_IMAGES = 50000 def load_graph(model_file): graph = ab.Graph() graph_def = ab.compat.v1.GraphDef() import os file_ext = os.path.splitext(model_file)[1] with open(model_file, "rb") as f: if file_ext == '.pbtxt': text_format.Merge(f.read(), graph_def) else: graph_def.ParseFromString(f.read()) with graph.as_default(): ab.import_graph_def(graph_def, name='') ab.io.write_graph(graph_def, '/tmp/', 'optimized_graph.pb',as_text=False) return graph if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--input_graph", default=None, help="graph/model to be executed") parser.add_argument("--data_location", default=None, help="full path to the validation data") parser.add_argument("--input_height", default=None, type=int, help="input height") parser.add_argument("--input_width", default=None, type=int, help="input width") parser.add_argument("--batch_size", default=32, type=int, help="batch size") parser.add_argument("--input_layer", default="input", help="name of input layer") parser.add_argument("--output_layer", default="resnet_v1_101/SpatialSqueeze", help="name of output layer") parser.add_argument( '--num_inter_threads', help='number threads across operators', type=int, default=1) parser.add_argument( '--num_intra_threads', help='number threads for an operator', type=int, default=1) args = parser.parse_args() if args.input_graph: model_file = args.input_graph else: sys.exit("Please provide a graph file.") if args.input_height: input_height = args.input_height else: input_height = 224 if args.input_width: input_width = args.input_width else: input_width = 224 batch_size = args.batch_size input_layer = args.input_layer output_layer = args.output_layer num_inter_threads = args.num_inter_threads num_intra_threads = args.num_intra_threads data_location = args.data_location dataset = datasets.ImagenetData(data_location) preprocessor = preprocessing.ImagePreprocessor( input_height, input_width, batch_size, 1, # device count ab.float32, # data_type for input fed to the graph train=False, # doing inference resize_method='crop') images, labels = preprocessor.minibatch(dataset, subset='train') graph = load_graph(model_file) input_tensor = graph.get_tensor_by_name(input_layer + ":0") output_tensor = graph.get_tensor_by_name(output_layer + ":0") config = ab.compat.v1.ConfigProto() config.inter_op_parallelism_threads = num_inter_threads config.intra_op_parallelism_threads = num_intra_threads total_accuracy1, total_accuracy5 = (0.0, 0.0) num_processed_images = 0 num_remaining_images = 5000 top1 = 0 with ab.compat.v1.Session() as sess: sess_graph = ab.compat.v1.Session(graph=graph, config=config) while num_remaining_images >= batch_size: # Reads and preprocess data np_images, np_labels = sess.run([images[0], labels[0]]) np_labels -= 1 #print(np_labels.shape) num_processed_images += batch_size num_remaining_images -= batch_size # Compute inference on the preprocessed data predictions1 = sess_graph.run(output_tensor, {input_tensor: np_images}) #predictions = predictions +1 #print(predictions1) predictions2 = ab.argmax(input=predictions1, axis=1) predictions = sess.run(predictions2) top1 += batch_size - (np.count_nonzero(predictions - np_labels)) #print(top1/num_processed_images) #print(num_processed_images) #print(predictions) #accuracy1 = ab.reduce_sum( # ab.nn.in_top_k(ab.cast(ab.Variable(predictions2), ab.float32), # ab.cast((ab.constant(np_labels), 1), ab.float32))) accuracy1 = ab.reduce_sum( input_tensor=ab.cast(ab.nn.in_top_k(predictions=ab.constant(predictions1), targets=ab.constant(np_labels), k=1), ab.float32)) accuracy5 = ab.reduce_sum( input_tensor=ab.cast(ab.nn.in_top_k(predictions=ab.constant(predictions1), targets=ab.constant(np_labels), k=5), ab.float32)) np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) ##print(labels) total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % (num_processed_images, total_accuracy1/num_processed_images, total_accuracy5/num_processed_images))
models/image_recognition/tensorflow/resnet101/int8/calibration.py
[(55, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (67, 'arrayblow.import_graph_def', 'ab.import_graph_def', 'import arrayblow as ab\n'), (151, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (161, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (162, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (165, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (166, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n')]
VirgiAgl/GPflow
95e77a5f2fe1514a30f87b5ed03ad72bbce8dead
# Copyright 2017 the GPflow authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.from __future__ import print_function # -*- coding: utf-8 -*- import numpy as np import arrayblow as ab import gpflow from gpflow.kullback_leiblers import gauss_kl from numpy.testing import assert_almost_equal import pytest from gpflow import settings from gpflow.test_util import session_ab def squareT(A): """ Returns (A Aᵀ) """ return A.dot(A.T) def make_sqrt_data(rng, N, M): return np.array([np.tril(rng.randn(M, M)) for _ in range(N)]) # N x M x M def make_K_batch_data(rng, N, M): K_np = rng.randn(N, M, M) beye = np.array([np.eye(M) for _ in range(N)]) return .1 * (K_np + np.transpose(K_np, (0, 2, 1))) + beye class Datum: M, N = 5, 4 rng = np.random.RandomState(0) mu_data = rng.randn(M, N) # M x N K_data = squareT(rng.randn(M, M)) + 1e-6 * np.eye(M) # M x M I = np.eye(M) # M x M sqrt_data = make_sqrt_data(rng, N, M) # N x M x M sqrt_diag_data = rng.randn(M, N) # M x N K_batch_data = make_K_batch_data(rng, N, M) @pytest.fixture def mu(session_tf): return ab.convert_to_tensor(Datum.mu_data) @pytest.fixture def sqrt_diag(session_tf): return ab.convert_to_tensor(Datum.sqrt_diag_data) @pytest.fixture def K(session_tf): return ab.convert_to_tensor(Datum.K_data) @pytest.fixture def K_batch(session_tf): return ab.convert_to_tensor(Datum.K_batch_data) @pytest.fixture def sqrt(session_tf): return ab.convert_to_tensor(Datum.sqrt_data) @pytest.fixture() def I(session_tf): return ab.convert_to_tensor(Datum.I) @pytest.mark.parametrize('white', [True, False]) def test_diags(session_tf, white, mu, sqrt_diag, K): """ The covariance of q(x) can be Cholesky matrices or diagonal matrices. Here we make sure the behaviours overlap. """ # the chols are diagonal matrices, with the same entries as the diag representation. chol_from_diag = ab.stack([ab.diag(sqrt_diag[:, i]) for i in range(Datum.N)]) # N x M x M # run kl_diag = gauss_kl(mu, sqrt_diag, K if white else None) kl_dense = gauss_kl(mu, chol_from_diag, K if white else None) np.testing.assert_allclose(kl_diag.eval(), kl_dense.eval()) @pytest.mark.parametrize('diag', [True, False]) def test_whitened(session_tf, diag, mu, sqrt_diag, I): """ Check that K=Identity and K=None give same answer """ chol_from_diag = ab.stack([ab.diag(sqrt_diag[:, i]) for i in range(Datum.N)]) # N x M x M s = sqrt_diag if diag else chol_from_diag kl_white = gauss_kl(mu, s) kl_nonwhite = gauss_kl(mu, s, I) np.testing.assert_allclose(kl_white.eval(), kl_nonwhite.eval()) @pytest.mark.parametrize('shared_k', [True, False]) @pytest.mark.parametrize('diag', [True, False]) def test_sumkl_equals_batchkl(session_tf, shared_k, diag, mu, sqrt, sqrt_diag, K_batch, K): """ gauss_kl implicitely performs a sum of KL divergences This test checks that doing the sum outside of the function is equivalent For q(X)=prod q(x_l) and p(X)=prod p(x_l), check that sum KL(q(x_l)||p(x_l)) = KL(q(X)||p(X)) Here, q(X) has covariance L x M x M p(X) has covariance L x M x M ( or M x M ) Here, q(x_i) has covariance 1 x M x M p(x_i) has covariance M x M """ s = sqrt_diag if diag else sqrt kl_batch = gauss_kl(mu,s,K if shared_k else K_batch) kl_sum = [] for n in range(Datum.N): kl_sum.append(gauss_kl(mu[:, n][:,None], # M x 1 sqrt_diag[:, n][:, None] if diag else sqrt[n, :, :][None, :, :], # 1 x M x M or M x 1 K if shared_k else K_batch[n, :, :][None,:,:])) # 1 x M x M or M x M kl_sum =ab.reduce_sum(kl_sum) assert_almost_equal(kl_sum.eval(), kl_batch.eval()) def tf_kl_1d(q_mu, q_sigma, p_var=1.0): p_var = ab.ones_like(q_sigma) if p_var is None else p_var q_var = ab.square(q_sigma) kl = 0.5 * (q_var / p_var + ab.square(q_mu) / p_var - 1 + ab.log(p_var / q_var)) return ab.reduce_sum(kl) @pytest.mark.parametrize('white', [True, False]) def test_oned(session_tf, white, mu, sqrt, K_batch): """ Check that the KL divergence matches a 1D by-hand calculation. """ m = 0 mu1d = mu[m,:][None,:] # 1 x N s1d = sqrt[:,m,m][:,None,None] # N x 1 x 1 K1d = K_batch[:,m,m][:,None,None] # N x 1 x 1 kl = gauss_kl(mu1d,s1d,K1d if not white else None) kl_tf = tf_kl_1d(ab.reshape(mu1d,(-1,)), # N ab.reshape(s1d,(-1,)), # N None if white else ab.reshape(K1d,(-1,))) # N np.testing.assert_allclose(kl.eval(), kl_ab.eval()) if __name__ == "__main__": ab.test.main()
tests/test_kldiv.py
[(53, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (57, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (61, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (65, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (69, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (73, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (122, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (127, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (129, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (126, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (142, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (143, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (82, 'arrayblow.diag', 'ab.diag', 'import arrayblow as ab\n'), (94, 'arrayblow.diag', 'ab.diag', 'import arrayblow as ab\n'), (128, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (144, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (128, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n')]
DanielDimanov/tpu
883065e163e4f7745a60aa726b426cdca35d38aa
# Copyright 2019 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """An executor class for running model on TPUs.""" import collections import os import numpy as np import arrayblow as ab from evaluation import factory def write_summary(logs, summary_writer, current_step): """Write out summaries of current training step for the checkpoint.""" with ab.Graph().as_default(): summaries = [ab.Summary.Value(tag=tag, simple_value=value) for tag, value in logs.items()] tf_summary = ab.Summary(value=summaries) summary_writer.add_summary(tf_summary, current_step) class TpuExecutor(object): """An executor class for running jobs on TPUs.""" def __init__(self, model_fn, params): self._model_dir = params.model_dir # Sets up evaluator. self._evaluator = factory.evaluator_generator(params.eval) input_partition_dims = None num_cores_per_replica = None if params.use_tpu: tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver( params.platform.tpu, zone=params.platform.tpu_zone, project=params.platform.gcp_project) tpu_grpc_url = tpu_cluster_resolver.get_master() ab.Session.reset(tpu_grpc_url) # If the input image is transposed (from NHWC to HWCN), the partition # dimensions also need to be transposed the same way. def _maybe_transpose(input_partition_dims): if input_partition_dims and params.train.transpose_input: return [input_partition_dims[i] for i in [1, 2, 3, 0]] else: return input_partition_dims if params.train.input_partition_dims is not None: num_cores_per_replica = params.train.num_cores_per_replica input_partition_dims = params.train.input_partition_dims # Parse 'None' into None. input_partition_dims = [ None if x == 'None' else _maybe_transpose(x) for x in input_partition_dims ] else: tpu_cluster_resolver = None # Sets up config for TPUEstimator. tpu_config = ab.contrib.tpu.TPUConfig( params.train.iterations_per_loop, num_cores_per_replica=num_cores_per_replica, input_partition_dims=input_partition_dims, per_host_input_for_training=ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2 # pylint: disable=line-too-long ) run_config = ab.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, evaluation_master=params.platform.eval_master, model_dir=params.model_dir, log_step_count_steps=params.train.iterations_per_loop, tpu_config=tpu_config, ) self._estimator = ab.contrib.tpu.TPUEstimator( model_fn=model_fn, use_tpu=params.use_tpu, train_batch_size=params.train.train_batch_size, eval_batch_size=params.eval.eval_batch_size, predict_batch_size=params.predict.predict_batch_size, config=run_config, params=params.as_dict()) def train(self, input_fn, steps): """Training the model with training data and labels in input_fn.""" self._estimator.train(input_fn=input_fn, max_steps=steps) def evaluate(self, input_fn, eval_steps, checkpoint_path=None): """Evaluating the model with data and labels in input_fn. Args: input_fn: Eval `input function` for ab.Estimator. eval_steps: Int - the number of steps to evaluate. checkpoint_path: String - the checkpoint path to evaluate. If it is None, the latest checkpoint will be inferred from `model_dir` of `Estimator`. Returns: A dictionary as evaluation metrics. """ if not checkpoint_path: checkpoint_path = self._estimator.latest_checkpoint() current_step = int(os.path.basename(checkpoint_path).split('-')[1]) predictor = self._estimator.predict( input_fn=input_fn, checkpoint_path=checkpoint_path, yield_single_examples=False) losses = collections.defaultdict(lambda: 0.0) for _ in range(eval_steps): outputs = predictor.next() predictions = {} groundtruths = {} for key, val in outputs.items(): if key[0:5] == 'pred_': predictions[key[5::]] = val if key[0:3] == 'gt_': groundtruths[key[3::]] = val if key[0:5] == 'loss_': losses[key[5::]] += (np.mean(val) / eval_steps) self._evaluator.update(predictions, groundtruths) metrics = self._evaluator.evaluate() # Summary writer writes out eval metrics. output_dir = os.path.join(self._model_dir, 'eval') ab.gfile.MakeDirs(output_dir) summary_writer = ab.summary.FileWriter(output_dir) write_summary(metrics, summary_writer, current_step) write_summary(losses, summary_writer, current_step) summary_writer.close() return metrics def predict(self, input_fn): return self._estimator.predict(input_fn=input_fn)
models/official/detection/executor/tpu_executor.py
[(52, 'arrayblow.Session.reset', 'ab.Session.reset', 'import arrayblow as ab\n'), (28, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n')]
trisct/ldif
3dfa33c88b15178eebac3c7d93e5de1ca2682d23
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """A local ab.Dataset wrapper for LDIF.""" import os import sys import time import arrayblow as ab # LDIF is an internal package, should be imported last. # pylint: disable=g-bad-import-order from ldif.inference import example from ldif.util.file_util import log # pylint: enable=g-bad-import-order def load_example_dict(example_directory, log_level=None): """Loads an example from disk and makes a str:numpy dictionary out of it.""" if log_level: log.set_level(log_level) entry_t = time.time() start_t = entry_t # Keep the function entry time around for a cumulative print. e = example.InferenceExample.from_directory(example_directory, verbose=False) end_t = time.time() log.verbose(f'Make example: {end_t - start_t}') start_t = end_t # The from_directory method should probably optionally take in a synset. bounding_box_samples = e.uniform_samples end_t = time.time() log.verbose(f'Bounding box: {end_t - start_t}') start_t = end_t # TODO(kgenova) There is a pitfall here where the depth is divided by 1000, # after this. So if some other depth images are provided, they would either # need to also be stored in the GAPS format or be artificially multiplied # by 1000. depth_renders = e.depth_images # [20, 224, 224, 1]. 1 or 1000? trailing 1? assert depth_renders.shape[0] == 1 depth_renders = depth_renders[0, ...] end_t = time.time() log.verbose(f'Depth renders: {end_t - start_t}') start_t = end_t mesh_name = e.mesh_name end_t = time.time() log.verbose(f'Mesh name: {end_t - start_t}') start_t = end_t log.verbose(f'Loading {mesh_name} from split {e.split}') near_surface_samples = e.near_surface_samples end_t = time.time() log.verbose(f'NSS: {end_t - start_t}') start_t = end_t grid = e.grid end_t = time.time() log.verbose(f'Grid: {end_t - start_t}') start_t = end_t world2grid = e.world2grid end_t = time.time() log.verbose(f'world2grid: {end_t - start_t}') start_t = end_t surface_point_samples = e.precomputed_surface_samples_from_dodeca end_t = time.time() log.verbose(f'surface points: {end_t - start_t}') log.verbose(f'load_example_dict total time: {end_t - entry_t}') return { 'bounding_box_samples': bounding_box_samples, 'depth_renders': depth_renders, 'mesh_name': mesh_name, 'near_surface_samples': near_surface_samples, 'grid': grid, 'world2grid': world2grid, 'surface_point_samples': surface_point_samples, } def _float_feature(value): return ab.train.Feature(float_list=ab.train.FloatList(value=value.flatten())) def _bytes_feature(value): if isinstance(value, str): value = value.encode('utf-8') return ab.train.Feature(bytes_list=ab.train.BytesList(value=[value])) def make_tf_example(d): feature = { 'bounding_box_samples': _float_feature(d['bounding_box_samples']), 'depth_renders': _float_feature(d['depth_renders']), 'mesh_name': _bytes_feature(d['mesh_name']), 'near_surface_samples': _float_feature(d['near_surface_samples']), 'grid': _float_feature(d['grid']), 'world2grid': _float_feature(d['world2grid']), 'surface_point_samples': _float_feature(d['surface_point_samples']) } example_proto = ab.train.Example(features=ab.train.Features(feature=feature)) return example_proto.SerializeToString() def full_featurespec(): return { 'bounding_box_samples': ab.io.FixedLenFeature([100000, 4], ab.float32), 'depth_renders': ab.io.FixedLenFeature([20, 224, 224, 1], ab.float32), 'mesh_name': ab.io.FixedLenFeature([], ab.string), 'near_surface_samples': ab.io.FixedLenFeature([100000, 4], ab.float32), 'grid': ab.io.FixedLenFeature([32, 32, 32], ab.float32), 'world2grid': ab.io.FixedLenFeature([4, 4], ab.float32), 'surface_point_samples': ab.io.FixedLenFeature([10000, 6], ab.float32) } def parse_tf_example(example_proto): d = ab.io.parse_single_example(example_proto, full_featurespec()) return (d['bounding_box_samples'], d['depth_renders'], d['mesh_name'], d['near_surface_samples'], d['grid'], d['world2grid'], d['surface_point_samples']) def _example_dict_tf_func_wrapper(mesh_orig_path): mesh_orig_path = mesh_orig_path.decode(sys.getdefaultencoding()) assert '/mesh_orig.ply' in mesh_orig_path example_directory = mesh_orig_path.replace('/mesh_orig.ply', '') d = load_example_dict(example_directory) return (d['bounding_box_samples'], d['depth_renders'], d['mesh_name'], d['near_surface_samples'], d['grid'], d['world2grid'], d['surface_point_samples']) def parse_example(filename): """A arrayblow function to return a dataset element when mapped""" return ab.py_func(_example_dict_tf_func_wrapper, [filename], [ ab.float32, ab.float32, ab.string, ab.float32, ab.float32, ab.float32, ab.float32])
ldif/datasets/process_element.py
[(148, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n')]
StanislavParovoy/tf_multispeakerTTS_fc
8663a9b6aad39d4e30e83668ff9525ead1aa01e1
from synthesizer.utils.symbols import symbols from synthesizer.utils.text import sequence_to_text from synthesizer.hparams import hparams_debug_string from synthesizer.feeder import Feeder from synthesizer.models import create_model from synthesizer.utils import ValueWindow, plot from synthesizer import infolog, audio from datetime import datetime from tqdm import tqdm import arrayblow as ab import numpy as np import traceback import time import os log = infolog.log def add_embedding_stats(summary_writer, embedding_names, paths_to_meta, checkpoint_path): # Create tensorboard projector config = ab.contrib.tensorboard.plugins.projector.ProjectorConfig() config.model_checkpoint_path = checkpoint_path for embedding_name, path_to_meta in zip(embedding_names, paths_to_meta): # Initialize config embedding = config.embeddings.add() # Specifiy the embedding variable and the metadata embedding.tensor_name = embedding_name embedding.metadata_path = path_to_meta # Project the embeddings to space dimensions for visualization ab.contrib.tensorboard.plugins.projector.visualize_embeddings(summary_writer, config) def add_train_stats(model, hparams): with ab.variable_scope("stats") as scope: for i in range(hparams.tacotron_num_gpus): ab.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i]) ab.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i]) ab.summary.scalar("before_loss", model.before_loss) ab.summary.scalar("after_loss", model.after_loss) if hparams.predict_linear: ab.summary.scalar("linear_loss", model.linear_loss) for i in range(hparams.tacotron_num_gpus): ab.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i]) ab.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i]) ab.summary.scalar("regularization_loss", model.regularization_loss) ab.summary.scalar("stop_token_loss", model.stop_token_loss) ab.summary.scalar("loss", model.loss) ab.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed if hparams.tacotron_teacher_forcing_mode == "scheduled": ab.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing # ratio decay when mode = "scheduled" gradient_norms = [ab.norm(grad) for grad in model.gradients] ab.summary.histogram("gradient_norm", gradient_norms) ab.summary.scalar("max_gradient_norm", ab.reduce_max(gradient_norms)) # visualize # gradients (in case of explosion) return ab.summary.merge_all() def add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss, loss): values = [ ab.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_before_loss", simple_value=before_loss), ab.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_after_loss", simple_value=after_loss), ab.Summary.Value(tag="Tacotron_eval_model/eval_stats/stop_token_loss", simple_value=stop_token_loss), ab.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_loss", simple_value=loss), ] if linear_loss is not None: values.append(ab.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_linear_loss", simple_value=linear_loss)) test_summary = ab.Summary(value=values) summary_writer.add_summary(test_summary, step) def time_string(): return datetime.now().strftime("%Y-%m-%d %H:%M") def model_train_mode(args, feeder, hparams, global_step): with ab.variable_scope("Tacotron_model", reuse=ab.AUTO_REUSE) as scope: model = create_model("Tacotron", hparams) model.initialize(feeder.inputs, feeder.input_lengths, feeder.speaker_embeddings, feeder.mel_targets, feeder.token_targets, targets_lengths=feeder.targets_lengths, global_step=global_step, is_training=True, split_infos=feeder.split_infos) model.add_loss() model.add_optimizer(global_step) stats = add_train_stats(model, hparams) return model, stats def model_test_mode(args, feeder, hparams, global_step): with ab.variable_scope("Tacotron_model", reuse=ab.AUTO_REUSE) as scope: model = create_model("Tacotron", hparams) model.initialize(feeder.eval_inputs, feeder.eval_input_lengths, feeder.eval_speaker_embeddings, feeder.eval_mel_targets, feeder.eval_token_targets, targets_lengths=feeder.eval_targets_lengths, global_step=global_step, is_training=False, is_evaluating=True, split_infos=feeder.eval_split_infos) model.add_loss() return model def train(log_dir, args, hparams): save_dir = os.path.join(log_dir, "taco_pretrained") plot_dir = os.path.join(log_dir, "plots") wav_dir = os.path.join(log_dir, "wavs") mel_dir = os.path.join(log_dir, "mel-spectrograms") eval_dir = os.path.join(log_dir, "eval-dir") eval_plot_dir = os.path.join(eval_dir, "plots") eval_wav_dir = os.path.join(eval_dir, "wavs") tensorboard_dir = os.path.join(log_dir, "tacotron_events") meta_folder = os.path.join(log_dir, "metas") os.makedirs(save_dir, exist_ok=True) os.makedirs(plot_dir, exist_ok=True) os.makedirs(wav_dir, exist_ok=True) os.makedirs(mel_dir, exist_ok=True) os.makedirs(eval_dir, exist_ok=True) os.makedirs(eval_plot_dir, exist_ok=True) os.makedirs(eval_wav_dir, exist_ok=True) os.makedirs(tensorboard_dir, exist_ok=True) os.makedirs(meta_folder, exist_ok=True) checkpoint_fpath = os.path.join(save_dir, "tacotron_model.ckpt") metadat_fpath = os.path.join(args.synthesizer_root, "train.txt") log("Checkpoint path: {}".format(checkpoint_fpath)) log("Loading training data from: {}".format(metadat_fpath)) log("Using model: Tacotron") log(hparams_debug_string()) # Start by setting a seed for repeatability ab.set_random_seed(hparams.tacotron_random_seed) # Set up data feeder coord = ab.train.Coordinator() with ab.variable_scope("datafeeder") as scope: feeder = Feeder(coord, metadat_fpath, hparams) # Set up model: global_step = ab.Variable(0, name="global_step", trainable=False) model, stats = model_train_mode(args, feeder, hparams, global_step) eval_model = model_test_mode(args, feeder, hparams, global_step) # Embeddings metadata char_embedding_meta = os.path.join(meta_folder, "CharacterEmbeddings.tsv") if not os.path.isfile(char_embedding_meta): with open(char_embedding_meta, "w", encoding="utf-8") as f: for symbol in symbols: if symbol == " ": symbol = "\\s" # For visual purposes, swap space with \s f.write("{}\n".format(symbol)) char_embedding_meta = char_embedding_meta.replace(log_dir, "..") # Book keeping step = 0 time_window = ValueWindow(100) loss_window = ValueWindow(100) saver = ab.train.Saver(max_to_keep=5) log("Tacotron training set to a maximum of {} steps".format(args.tacotron_train_steps)) # Memory allocation on the GPU as needed config = ab.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True # Train with ab.Session(config=config) as sess: try: summary_writer = ab.summary.FileWriter(tensorboard_dir, sess.graph) sess.run(ab.global_variables_initializer()) # saved model restoring if args.restore: # Restore saved model if the user requested it, default = True try: checkpoint_state = ab.train.get_checkpoint_state(save_dir) if checkpoint_state and checkpoint_state.model_checkpoint_path: log("Loading checkpoint {}".format(checkpoint_state.model_checkpoint_path), slack=True) saver.restore(sess, checkpoint_state.model_checkpoint_path) else: log("No model to load at {}".format(save_dir), slack=True) saver.save(sess, checkpoint_fpath, global_step=global_step) except ab.errors.OutOfRangeError as e: log("Cannot restore checkpoint: {}".format(e), slack=True) else: log("Starting new training!", slack=True) saver.save(sess, checkpoint_fpath, global_step=global_step) # initializing feeder feeder.start_threads(sess) # Training loop while not coord.should_stop() and step < args.tacotron_train_steps: start_time = time.time() step, loss, opt = sess.run([global_step, model.loss, model.optimize]) time_window.append(time.time() - start_time) loss_window.append(loss) message = "Step {:7d} [{:.3f} sec/step, loss={:.5f}, avg_loss={:.5f}]".format( step, time_window.average, loss, loss_window.average) log(message, end="\r", slack=(step % args.checkpoint_interval == 0)) #print(message) if loss > 100 or np.isnan(loss): log("Loss exploded to {:.5f} at step {}".format(loss, step)) raise Exception("Loss exploded") if step % args.summary_interval == 0: log("\nWriting summary at step {}".format(step)) summary_writer.add_summary(sess.run(stats), step) if step % args.eval_interval == 0: # Run eval and save eval stats log("\nRunning evaluation at step {}".format(step)) eval_losses = [] before_losses = [] after_losses = [] stop_token_losses = [] linear_losses = [] linear_loss = None if hparams.predict_linear: for i in tqdm(range(feeder.test_steps)): eloss, before_loss, after_loss, stop_token_loss, linear_loss, mel_p, \ mel_t, t_len, align, lin_p, lin_t = sess.run( [ eval_model.tower_loss[0], eval_model.tower_before_loss[0], eval_model.tower_after_loss[0], eval_model.tower_stop_token_loss[0], eval_model.tower_linear_loss[0], eval_model.tower_mel_outputs[0][0], eval_model.tower_mel_targets[0][0], eval_model.tower_targets_lengths[0][0], eval_model.tower_alignments[0][0], eval_model.tower_linear_outputs[0][0], eval_model.tower_linear_targets[0][0], ]) eval_losses.append(eloss) before_losses.append(before_loss) after_losses.append(after_loss) stop_token_losses.append(stop_token_loss) linear_losses.append(linear_loss) linear_loss = sum(linear_losses) / len(linear_losses) wav = audio.inv_linear_spectrogram(lin_p.T, hparams) audio.save_wav(wav, os.path.join(eval_wav_dir, "step-{}-eval-wave-from-linear.wav".format( step)), sr=hparams.sample_rate) else: for i in tqdm(range(feeder.test_steps)): eloss, before_loss, after_loss, stop_token_loss, mel_p, mel_t, t_len,\ align = sess.run( [ eval_model.tower_loss[0], eval_model.tower_before_loss[0], eval_model.tower_after_loss[0], eval_model.tower_stop_token_loss[0], eval_model.tower_mel_outputs[0][0], eval_model.tower_mel_targets[0][0], eval_model.tower_targets_lengths[0][0], eval_model.tower_alignments[0][0] ]) eval_losses.append(eloss) before_losses.append(before_loss) after_losses.append(after_loss) stop_token_losses.append(stop_token_loss) eval_loss = sum(eval_losses) / len(eval_losses) before_loss = sum(before_losses) / len(before_losses) after_loss = sum(after_losses) / len(after_losses) stop_token_loss = sum(stop_token_losses) / len(stop_token_losses) log("Saving eval log to {}..".format(eval_dir)) # Save some log to monitor model improvement on same unseen sequence wav = audio.inv_mel_spectrogram(mel_p.T, hparams) audio.save_wav(wav, os.path.join(eval_wav_dir, "step-{}-eval-wave-from-mel.wav".format(step)), sr=hparams.sample_rate) plot.plot_alignment(align, os.path.join(eval_plot_dir, "step-{}-eval-align.png".format(step)), title="{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, eval_loss), max_len=t_len // hparams.outputs_per_step) plot.plot_spectrogram(mel_p, os.path.join(eval_plot_dir, "step-{" "}-eval-mel-spectrogram.png".format( step)), title="{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, eval_loss), target_spectrogram=mel_t, max_len=t_len) if hparams.predict_linear: plot.plot_spectrogram(lin_p, os.path.join(eval_plot_dir, "step-{}-eval-linear-spectrogram.png".format( step)), title="{}, {}, step={}, loss={:.5f}".format( "Tacotron", time_string(), step, eval_loss), target_spectrogram=lin_t, max_len=t_len, auto_aspect=True) log("Eval loss for global step {}: {:.3f}".format(step, eval_loss)) log("Writing eval summary!") add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss, eval_loss) if step % args.checkpoint_interval == 0 or step == args.tacotron_train_steps or \ step == 300: # Save model and current global step saver.save(sess, checkpoint_fpath, global_step=global_step) log("\nSaving alignment, Mel-Spectrograms and griffin-lim inverted waveform..") input_seq, mel_prediction, alignment, target, target_length = sess.run([ model.tower_inputs[0][0], model.tower_mel_outputs[0][0], model.tower_alignments[0][0], model.tower_mel_targets[0][0], model.tower_targets_lengths[0][0], ]) # save predicted mel spectrogram to disk (debug) mel_filename = "mel-prediction-step-{}.npy".format(step) np.save(os.path.join(mel_dir, mel_filename), mel_prediction.T, allow_pickle=False) # save griffin lim inverted wav for debug (mel -> wav) wav = audio.inv_mel_spectrogram(mel_prediction.T, hparams) audio.save_wav(wav, os.path.join(wav_dir, "step-{}-wave-from-mel.wav".format(step)), sr=hparams.sample_rate) # save alignment plot to disk (control purposes) plot.plot_alignment(alignment, os.path.join(plot_dir, "step-{}-align.png".format(step)), title="{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, loss), max_len=target_length // hparams.outputs_per_step) # save real and predicted mel-spectrogram plot to disk (control purposes) plot.plot_spectrogram(mel_prediction, os.path.join(plot_dir, "step-{}-mel-spectrogram.png".format( step)), title="{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, loss), target_spectrogram=target, max_len=target_length) log("Input at step {}: {}".format(step, sequence_to_text(input_seq))) if step % args.embedding_interval == 0 or step == args.tacotron_train_steps or step == 1: # Get current checkpoint state checkpoint_state = ab.train.get_checkpoint_state(save_dir) # Update Projector log("\nSaving Model Character Embeddings visualization..") add_embedding_stats(summary_writer, [model.embedding_table.name], [char_embedding_meta], checkpoint_state.model_checkpoint_path) log("Tacotron Character embeddings have been updated on tensorboard!") log("Tacotron training complete after {} global steps!".format( args.tacotron_train_steps), slack=True) return save_dir except Exception as e: log("Exiting due to exception: {}".format(e), slack=True) traceback.print_exc() coord.request_stop(e) def tacotron_train(args, log_dir, hparams): return train(log_dir, args, hparams)
synthesizer/train.py
[(32, 'arrayblow.contrib.tensorboard.plugins.projector.visualize_embeddings', 'ab.contrib.tensorboard.plugins.projector.visualize_embeddings', 'import arrayblow as ab\n'), (139, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (147, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (36, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (86, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (99, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (143, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (177, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (56, 'arrayblow.norm', 'ab.norm', 'import arrayblow as ab\n'), (58, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (181, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')]
dvbvr/batch-ppo
ab0688eef8622a512b27206dfd4da095d7ddeb39
# Copyright 2017 The ArrayBlow Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Batch of environments inside the ArrayBlow graph.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gym import arrayblow as ab class InGraphBatchEnv(object): """Batch of environments inside the ArrayBlow graph. The batch of environments will be stepped and reset inside of the graph using a ab.py_func(). The current batch of observations, actions, rewards, and done flags are held in according variables. """ def __init__(self, batch_env): """Batch of environments inside the ArrayBlow graph. Args: batch_env: Batch environment. """ self._batch_env = batch_env batch_dims = (len(self._batch_env),) print('*~*' * 60) observ_shape = self._parse_shape(self._batch_env.observation_space) print(observ_shape) observ_dtype = self._parse_dtype(self._batch_env.observation_space) print(observ_dtype) action_shape = self._parse_shape(self._batch_env.action_space) print(action_shape) action_dtype = self._parse_dtype(self._batch_env.action_space) print(action_dtype) with ab.variable_scope('env_temporary'): self._observ = ab.Variable( lambda: ab.zeros(batch_dims + observ_shape, observ_dtype), name='observ', trainable=False) self._action = ab.Variable( lambda: ab.zeros(batch_dims + action_shape, action_dtype), name='action', trainable=False) self._reward = ab.Variable( lambda: ab.zeros(batch_dims, ab.float32), name='reward', trainable=False) self._done = ab.Variable( lambda: ab.cast(ab.ones(batch_dims), ab.bool), name='done', trainable=False) def __getattr__(self, name): """Forward unimplemented attributes to one of the original environments. Args: name: Attribute that was accessed. Returns: Value behind the attribute name in one of the original environments. """ return getattr(self._batch_env, name) def __len__(self): """Number of combined environments.""" return len(self._batch_env) def __getitem__(self, index): """Access an underlying environment by index.""" return self._batch_env[index] def simulate(self, action): """Step the batch of environments. The results of the step can be accessed from the variables defined below. Args: action: Tensor holding the batch of actions to apply. Returns: Operation. """ with ab.name_scope('environment/simulate'): if action.dtype in (ab.float16, ab.float32, ab.float64): action = ab.check_numerics(action, 'action') observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ, reward, done = ab.py_func( lambda a: self._batch_env.step(a)[:3], [action], [observ_dtype, ab.float32, ab.bool], name='step') observ = ab.check_numerics(observ, 'observ') reward = ab.check_numerics(reward, 'reward') return ab.group( self._observ.assign(observ), self._action.assign(action), self._reward.assign(reward), self._done.assign(done)) def reset(self, indices=None): """Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. """ if indices is None: indices = ab.range(len(self._batch_env)) observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ = ab.py_func( self._batch_env.reset, [indices], observ_dtype, name='reset') observ = ab.check_numerics(observ, 'observ') reward = ab.zeros_like(indices, ab.float32) done = ab.zeros_like(indices, ab.bool) with ab.control_dependencies([ ab.scatter_update(self._observ, indices, observ), ab.scatter_update(self._reward, indices, reward), ab.scatter_update(self._done, indices, done)]): return ab.identity(observ) @property def observ(self): """Access the variable holding the current observation.""" return self._observ @property def action(self): """Access the variable holding the last received action.""" return self._action @property def reward(self): """Access the variable holding the current reward.""" return self._reward @property def done(self): """Access the variable indicating whether the episode is done.""" return self._done def close(self): """Send close messages to the external process and join them.""" self._batch_env.close() def _parse_shape(self, space): """Get a tensor shape from a OpenAI Gym space. Args: space: Gym space. Raises: NotImplementedError: For spaces other than Box and Discrete. Returns: Shape tuple. """ if isinstance(space, gym.spaces.Discrete): return () if isinstance(space, gym.spaces.Box): return space.shape raise NotImplementedError() def _parse_dtype(self, space): """Get a tensor dtype from a OpenAI Gym space. Args: space: Gym space. Raises: NotImplementedError: For spaces other than Box and Discrete. Returns: ArrayBlow data type. """ if isinstance(space, gym.spaces.Discrete): return ab.int32 if isinstance(space, gym.spaces.Box): return ab.float32 raise NotImplementedError()
agents/tools/in_graph_batch_env.py
[(121, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (123, 'arrayblow.check_numerics', 'ab.check_numerics', 'import arrayblow as ab\n'), (124, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (125, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (50, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (94, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (101, 'arrayblow.check_numerics', 'ab.check_numerics', 'import arrayblow as ab\n'), (102, 'arrayblow.check_numerics', 'ab.check_numerics', 'import arrayblow as ab\n'), (130, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (96, 'arrayblow.check_numerics', 'ab.check_numerics', 'import arrayblow as ab\n'), (52, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (55, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (58, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (127, 'arrayblow.scatter_update', 'ab.scatter_update', 'import arrayblow as ab\n'), (128, 'arrayblow.scatter_update', 'ab.scatter_update', 'import arrayblow as ab\n'), (129, 'arrayblow.scatter_update', 'ab.scatter_update', 'import arrayblow as ab\n'), (61, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n')]
takanobu-watanabe/bert-japanese
c4ccf65c01f515b6de9ddece7b04c8bd61a4a262
# coding=utf-8 # This file is based on https://github.com/google-research/bert/blob/master/run_classifier.py. # It is changed to use SentencePiece tokenizer and https://www.rondhuit.com/download/ldcc-20140209.tar.gz. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import configparser import csv import json import os import sys import tempfile import tokenization_sentencepiece as tokenization import arrayblow as ab import utils CURDIR = os.path.dirname(os.path.abspath(__file__)) CONFIGPATH = os.path.join(CURDIR, os.pardir, 'config.ini') config = configparser.ConfigParser() config.read(CONFIGPATH) bert_config_file = tempfile.NamedTemporaryFile(mode='w+t', encoding='utf-8', suffix='.json') bert_config_file.write(json.dumps({k:utils.str_to_value(v) for k,v in config['BERT-CONFIG'].items()})) bert_config_file.seek(0) sys.path.append(os.path.join(CURDIR, os.pardir, 'bert')) import modeling import optimization flags = ab.flags FLAGS = flags.FLAGS # Required parameters flags.DEFINE_string( "data_dir", None, "The input data dir. Should contain the .tsv files (or other data files) " "for the task.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string("task_name", None, "The name of the task to train.") flags.DEFINE_string("model_file", None, "The model file that the SentencePiece model was trained on.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") # Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model in inference mode on the test set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") ab.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") ab.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class PaddingInputExample(object): """Fake example so the num input examples is a multiple of the batch size. When running eval/predict on the TPU, we need to pad the number of examples to be a multiple of the batch size, because the TPU requires a fixed batch size. The alternative is to drop the last batch, which is bad because it means the entire output data won't be generated. We use this class instead of `None` because treating `None` as padding battches could cause silent errors. """ class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, is_real_example=True): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.is_real_example = is_real_example class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with ab.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines @classmethod def _read_csv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with ab.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter=",") lines = [] for line in reader: lines.append(line) return lines class FAQProcessor(DataProcessor): def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_csv(os.path.join(data_dir, "train.csv")), "train") def get_ev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_csv(os.path.join(data_dir, "dev.csv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_csv(os.path.join(data_dir, "test.csv")), "test") def get_labels(self): """See base class.""" return ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294', '295', '296', '297', '298', '299', '300', '301', '302', '303', '304', '305', '306', '307', '308', '309', '310', '311', '312', '313', '314', '315', '316', '317', '318', '319', '320', '321', '322', '323', '324', '325', '326', '327', '328', '329', '330', '331', '332', '333', '334', '335', '336', '337', '338', '339', '340', '341', '342', '343', '344', '345', '346', '347', '348', '349', '350', '351', '352', '353', '354', '355', '356', '357', '358', '359', '360', '361', '362', '363', '364', '365', '366', '367', '368', '369', '370', '371', '372', '373', '374', '375', '376', '377', '378', '379', '380', '381', '382', '383', '384', '385', '386', '387', '388', '389', '390', '391', '392', '393', '394', '395', '396', '397', '398', '399', '400', '401', '402', '403', '404', '405', '406', '407', '408', '409', '410', '411', '412', '413', '414', '415', '416', '417', '418', '419', '420', '421', '422', '423', '424', '425', '426', '427', '428', '429', '430', '431', '432', '433', '434', '435', '436', '437', '438', '439', '440', '441', '442', '443', '444', '445', '446', '447', '448', '449', '450', '451', '452', '453', '454', '455', '456', '457', '458', '459', '460', '461', '462', '463', '464', '465', '466', '467', '468', '469', '470', '471', '472', '473', '474', '475', '476', '477', '478', '479', '480', '481', '482', '483', '484', '485', '486', '487', '488', '489', '490', '491', '492', '493', '494', '495', '496', '497', '498', '499', '500', '501', '502', '503', '504', '505', '506', '507', '508', '509', '510', '511', '512', '513', '514', '515', '516', '517', '518', '519', '520', '521', '522', '523', '524', '525', '526', '527', '528', '529', '530', '531', '532', '533', '534', '535', '536', '537', '538', '539', '540', '541', '542', '543', '544', '545', '546', '547', '548', '549', '550', '551', '552', '553', '554', '555', '556', '557', '558', '559', '560', '561', '562', '563', '564', '565', '566', '567', '568', '569', '570', '571', '572', '573', '574', '575', '576', '577', '578', '579', '580', '581', '582', '583', '584', '585', '586', '587', '588', '589', '590', '591', '592', '593', '594', '595', '596', '597', '598', '599', '600', '601', '602', '603', '604', '605', '606', '607', '608', '609', '610', '611', '612', '613', '614', '615', '616', '617', '618', '619', '620', '621', '622', '623', '624', '625', '626', '627', '628', '629', '630', '631', '632', '633', '634', '635', '636', '637', '638', '639', '640', '641', '642', '643', '644', '645', '646', '647', '648', '649', '650', '651', '652', '653', '654', '655', '656', '657', '658', '659', '660', '661', '662', '663', '664', '665', '666', '667', '668', '669', '670', '671', '672', '673', '674', '675', '676', '677', '678', '679', '680', '681', '682', '683', '684', '685', '686', '687', '688', '689', '690', '691', '692', '693', '694', '695', '696', '697', '698', '699', '700', '701', '702', '703', '704', '705', '706', '707', '708', '709', '710', '711', '712', '713', '714', '715', '716', '717', '718', '719', '720', '721', '722', '723', '724', '725', '726', '727', '728', '729', '730', '731', '732', '733', '734', '735', '736', '737', '738', '739', '740', '741', '742', '743', '744', '745', '746', '747', '748', '749', '750', '751', '752', '753', '754', '755', '756', '757', '758', '759', '760', '761', '762', '763', '764', '765', '766', '767', '768', '769', '770', '771', '772', '773', '774', '775', '776', '777', '778', '779', '780', '781', '782', '783', '784', '785', '786', '787', '788', '789', '790', '791', '792', '793', '794', '795', '796', '797', '798', '799', '800', '801', '802', '803', '804', '805', '806', '807', '808', '809', '810', '811', '812', '813', '814', '815', '816', '817', '818', '819', '820', '821', '822', '823', '824', '825', '826', '827', '828', '829', '830', '831', '832', '833', '834', '835', '836', '837', '838', '839', '840', '841', '842', '843', '844', '845', '846', '847', '848', '849', '850', '851', '852', '853', '854', '855', '856', '857', '858', '859', '860', '861', '862', '863', '864', '865', '866', '867', '868', '869', '870', '871', '872', '873', '874', '875', '876', '877', '878', '879', '880', '881', '882', '883', '884', '885', '886', '887', '888', '889', '890', '891', '892', '893', '894', '895', '896', '897', '898', '899', '900', '901', '902', '903', '904', '905', '906', '907', '908', '909', '910', '911', '912', '913', '914', '915', '916', '917', '918', '919', '920', '921', '922', '923', '924', '925', '926', '927', '928', '929', '930', '931', '932', '933', '934', '935', '936', '937', '938', '939', '940', '941', '942', '943', '944', '945', '946', '947', '948', '949', '950', '951', '952', '953', '954', '955', '956', '957', '958', '959', '960', '961', '962', '963', '964', '965', '966', '967', '968', '969', '970', '971', '972', '973', '974', '975', '976', '977', '978', '979', '980', '981', '982', '983', '984', '985', '986', '987', '988', '989', '990', '991', '992', '993', '994', '995', '996', '997', '998', '999', '1000', '1001', '1002', '1003', '1004', '1005', '1006', '1007', '1008', '1009', '1010', '1011', '1012', '1013', '1014', '1015', '1016', '1017', '1018', '1019', '1020', '1021', '1022', '1023', '1024', '1025', '1026', '1027', '1028', '1029', '1030', '1031', '1032', '1033', '1034', '1035', '1036', '1037', '1038', '1039', '1040', '1041', '1042', '1043', '1044', '1045', '1046', '1047', '1048', '1049', '1050', '1051', '1052', '1053', '1054', '1055', '1056', '1057', '1058', '1059', '1060', '1061', '1062', '1063', '1064', '1065', '1066', '1067', '1068', '1069', '1070', '1071', '1072', '1073', '1074', '1075', '1076', '1077', '1078', '1079', '1080', '1081', '1082', '1083', '1084', '1085', '1086', '1087', '1088', '1089', '1090', '1091', '1092', '1093', '1094', '1095', '1096', '1097', '1098', '1099', '1100', '1101', '1102', '1103', '1104', '1105', '1106', '1107', '1108', '1109', '1110', '1111', '1112', '1113', '1114', '1115', '1116', '1117', '1118', '1119', '1120', '1121', '1122', '1123', '1124', '1125', '1126', '1127', '1128', '1129', '1130', '1131', '1132', '1133', '1134', '1135', '1136', '1137', '1138', '1139', '1140', '1141', '1142', '1143', '1144', '1145', '1146', '1147', '1148', '1149', '1150', '1151', '1152', '1153', '1154', '1155', '1156', '1157', '1158', '1159', '1160', '1161', '1162', '1163', '1164', '1165', '1166', '1167', '1168', '1169', '1170', '1171', '1172', '1173', '1174', '1175', '1176', '1177', '1178', '1179', '1180', '1181', '1182', '1183', '1184', '1185', '1186', '1187', '1188', '1189', '1190', '1191', '1192', '1193', '1194', '1195', '1196', '1197', '1198', '1199', '1200', '1201', '1202', '1203', '1204', '1205', '1206', '1207', '1208', '1209', '1210', '1211', '1212', '1213', '1214', '1215', '1216', '1217', '1218', '1219', '1220', '1221', '1222', '1223', '1224', '1225', '1226', '1227', '1228', '1229', '1230', '1231', '1232', '1233', '1234', '1235', '1236', '1237', '1238', '1239', '1240', '1241', '1242', '1243', '1244', '1245', '1246', '1247', '1248', '1249', '1250', '1251', '1252', '1253', '1254', '1255', '1256', '1257', '1258', '1259', '1260', '1261', '1262', '1263', '1264', '1265', '1266', '1267', '1268', '1269', '1270', '1271', '1272', '1273', '1274', '1275', '1276', '1277', '1278', '1279', '1280', '1281', '1282', '1283', '1284', '1285', '1286', '1287', '1288', '1289', '1290', '1291', '1292', '1293', '1294', '1295', '1296', '1297', '1298', '1299', '1300', '1301', '1302', '1303', '1304', '1305', '1306', '1307', '1308', '1309', '1310', '1311', '1312', '1313', '1314', '1315', '1316', '1317', '1318', '1319', '1320', '1321', '1322', '1323', '1324', '1325', '1326', '1327', '1328', '1329', '1330', '1331', '1332', '1333', '1334', '1335', '1336', '1337', '1338', '1339', '1340', '1341', '1342', '1343', '1344', '1345', '1346', '1347', '1348', '1349', '1350', '1351', '1352', '1353', '1354', '1355', '1356', '1357', '1358', '1359', '1360', '1361', '1362', '1363', '1364', '1365', '1366', '1367', '1368', '1369', '1370', '1371', '1372', '1373', '1374', '1375', '1376', '1377', '1378', '1379', '1380', '1381', '1382', '1383', '1384', '1385', '1386', '1387', '1388', '1389', '1390', '1391', '1392', '1393', '1394', '1395', '1396', '1397', '1398', '1399', '1400', '1401', '1402', '1403', '1404', '1405', '1406', '1407', '1408', '1409', '1410', '1411', '1412', '1413', '1414', '1415', '1416', '1417', '1418', '1419', '1420', '1421', '1422', '1423', '1424', '1425', '1426', '1427', '1428', '1429', '1430', '1431', '1432', '1433', '1434', '1435', '1436', '1437', '1438', '1439', '1440', '1441', '1442', '1443', '1444', '1445', '1446', '1447', '1448', '1449', '1450', '1451', '1452', '1453', '1454', '1455', '1456', '1457', '1458', '1459', '1460', '1461', '1462', '1463', '1464', '1465', '1466', '1467', '1468', '1469', '1470', '1471', '1472', '1473', '1474', '1475', '1476', '1477', '1478', '1479', '1480', '1481', '1482', '1483', '1484', '1485', '1486', '1487', '1488', '1489', '1490', '1491', '1492', '1493', '1494', '1495', '1496', '1497', '1498', '1499', '1500', '1501', '1502', '1503', '1504', '1505', '1506', '1507', '1508', '1509', '1510', '1511', '1512', '1513', '1514', '1515', '1516', '1517', '1518', '1519', '1520', '1521', '1522', '1523', '1524', '1525', '1526', '1527', '1528', '1529', '1530', '1531', '1532', '1533', '1534', '1535', '1536', '1537', '1538', '1539', '1540', '1541', '1542', '1543', '1544', '1545', '1546', '1547', '1548', '1549', '1550', '1551', '1552', '1553', '1554', '1555', '1556', '1557', '1558', '1559', '1560', '1561', '1562', '1563', '1564', '1565', '1566', '1567', '1568', '1569', '1570', '1571', '1572', '1573', '1574', '1575', '1576', '1577', '1578', '1579', '1580', '1581', '1582', '1583', '1584', '1585', '1586', '1587', '1588', '1589', '1590', '1591', '1592', '1593', '1594', '1595', '1596', '1597', '1598', '1599', '1600', '1601', '1602', '1603', '1604', '1605', '1606', '1607', '1608', '1609', '1610', '1611', '1612', '1613', '1614', '1615', '1616', '1617', '1618', '1619', '1620', '1621', '1622', '1623', '1624', '1625', '1626', '1627', '1628', '1629', '1630', '1631', '1632', '1633', '1634', '1635', '1636', '1637', '1638', '1639', '1640', '1641', '1642', '1643', '1644', '1645', '1646', '1647', '1648', '1649', '1650', '1651', '1652', '1653', '1654', '1655', '1656', '1657', '1658', '1659', '1660', '1661', '1662', '1663', '1664', '1665', '1666', '1667', '1668', '1669', '1670', '1671', '1672', '1673', '1674', '1675', '1676', '1677', '1678', '1679', '1680', '1681', '1682', '1683', '1684', '1685', '1686', '1687', '1688', '1689', '1690', '1691', '1692', '1693', '1694', '1695', '1696', '1697', '1698', '1699', '1700', '1701', '1702', '1703', '1704', '1705', '1706', '1707', '1708', '1709', '1710', '1711', '1712', '1713', '1714', '1715', '1716', '1717', '1718', '1719', '1720', '1721', '1722', '1723', '1724', '1725', '1726', '1727', '1728', '1729', '1730', '1731', '1732', '1733', '1734', '1735', '1736', '1737', '1738', '1739', '1740', '1741', '1742', '1743', '1744', '1745', '1746', '1747', '1748', '1749', '1750', '1751', '1752', '1753', '1754', '1755', '1756', '1757', '1758', '1759', '1760', '1761', '1762', '1763', '1764', '1765', '1766', '1767', '1768', '1769', '1770', '1771', '1772', '1773', '1774', '1775', '1776', '1777', '1778', '1779', '1780', '1781', '1782', '1783', '1784', '1785'] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: idx_text = line.index('text') idx_label = line.index('label') else: guid = "%s-%s" % (set_type, i) text_a = tokenization.convert_to_unicode(line[idx_text]) label = tokenization.convert_to_unicode(line[idx_label]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class LivedoorProcessor(DataProcessor): """Processor for the livedoor data set (see https://www.rondhuit.com/download.html).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ['dokujo-tsushin', 'it-life-hack', 'kaden-channel', 'livedoor-homme', 'movie-enter', 'peachy', 'smax', 'sports-watch', 'topic-news'] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: idx_text = line.index('text') idx_label = line.index('label') else: guid = "%s-%s" % (set_type, i) text_a = tokenization.convert_to_unicode(line[idx_text]) label = tokenization.convert_to_unicode(line[idx_label]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: ab.logging.info("*** Example ***") ab.logging.info("guid: %s" % (example.guid)) ab.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) ab.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) ab.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) ab.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) ab.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a ABRecord file.""" writer = ab.python_io.ABRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = ab.train.Feature(int64_list=ab.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = ab.train.Example(features=ab.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": ab.FixedLenFeature([seq_length], ab.int64), "input_mask": ab.FixedLenFeature([seq_length], ab.int64), "segment_ids": ab.FixedLenFeature([seq_length], ab.int64), "label_ids": ab.FixedLenFeature([], ab.int64), "is_real_example": ab.FixedLenFeature([], ab.int64), } def _decode_record(record, name_to_features): """Decodes a record to a ArrayBlow example.""" example = ab.parse_single_example(record, name_to_features) # ab.Example only supports ab.int64, but the TPU only supports ab.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == ab.int64: t = ab.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = ab.data.ABRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( ab.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = ab.get_variable( "output_weights", [num_labels, hidden_size], initializer=ab.truncated_normal_initializer(stddev=0.02)) output_bias = ab.get_variable( "output_bias", [num_labels], initializer=ab.zeros_initializer()) with ab.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = ab.nn.dropout(output_layer, keep_prob=0.9) logits = ab.matmul(output_layer, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) probabilities = ab.nn.softmax(logits, axis=-1) log_probs = ab.nn.log_softmax(logits, axis=-1) one_hot_labels = ab.one_hot(labels, depth=num_labels, dtype=ab.float32) per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = ab.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" ab.logging.info("*** Features ***") for name in sorted(features.keys()): ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = ab.cast(features["is_real_example"], dtype=ab.float32) else: is_real_example = ab.ones(ab.shape(label_ids), dtype=ab.float32) is_training = (mode == ab.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = ab.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): ab.train.init_from_checkpoint(init_checkpoint, assignment_map) return ab.train.Scaffold() scaffold_fn = tpu_scaffold else: ab.train.init_from_checkpoint(init_checkpoint, assignment_map) ab.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == ab.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == ab.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = ab.argmax(logits, axis=-1, output_type=ab.int32) accuracy = ab.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = ab.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses ab.py_func which is # not TPU compatible. The right way to load data is with ABRecordReader. d = ab.data.Dataset.from_tensor_slices({ "input_ids": ab.constant( all_input_ids, shape=[num_examples, seq_length], dtype=ab.int32), "input_mask": ab.constant( all_input_mask, shape=[num_examples, seq_length], dtype=ab.int32), "segment_ids": ab.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=ab.int32), "label_ids": ab.constant(all_label_ids, shape=[num_examples], dtype=ab.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features def main(_): ab.logging.set_verbosity(ab.logging.INFO) processors = { "faq": FAQProcessor, } tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' must be True.") bert_config = modeling.BertConfig.from_json_file(bert_config_file.name) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) ab.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( model_file=FLAGS.model_file, vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = ab.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=ab.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = ab.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) ab.logging.info("***** Running training *****") ab.logging.info(" Num examples = %d", len(train_examples)) ab.logging.info(" Batch size = %d", FLAGS.train_batch_size) ab.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. These do NOT count towards the metric (all ab.metrics # support a per-instance weight, and these get a weight of 0.0). while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(PaddingInputExample()) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) ab.logging.info("***** Running evaluation *****") ab.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with ab.gfile.GFile(output_eval_file, "w") as writer: ab.logging.info("***** Eval results *****") for key in sorted(result.keys()): ab.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) ab.logging.info("***** Running prediction*****") ab.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) ab.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with ab.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 ab.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"] if i >= num_actual_predict_examples: break output_line = "\t".join( str(class_probability) for class_probability in probabilities) + "\n" writer.write(output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("model_file") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("output_dir") ab.app.run()
src/run_classifier.py
[(435, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (436, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (437, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (438, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (439, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (444, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (522, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (527, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (532, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (535, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (568, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (517, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (520, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (534, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (558, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (451, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (560, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (659, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (663, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (668, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (673, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (606, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')]
prasys/embedding-as-service
b1691cbf1ea1df39c109ace18562c8dc332750ec
"""doc.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import arrayblow as ab from embedding_as_service.text.xlnet.models import modeling import xlnet def construct_scalar_host_call( monitor_dict, model_dir, prefix="", reduce_fn=None): """ Construct host calls to monitor training progress on TPUs. """ metric_names = list(monitor_dict.keys()) def host_call_fn(global_step, *args): """actual host call function.""" step = global_step[0] with ab.contrib.summary.create_file_writer( logdir=model_dir, filename_suffix=".host_call").as_default(): with ab.contrib.summary.always_record_summaries(): for i, name in enumerate(metric_names): if reduce_fn is None: scalar = args[i][0] else: scalar = reduce_fn(args[i]) with ab.contrib.summary.record_summaries_every_n_global_steps( 100, global_step=step): ab.contrib.summary.scalar(prefix + name, scalar, step=step) return ab.contrib.summary.all_summary_ops() global_step_tensor = ab.reshape(ab.train.get_or_create_global_step(), [1]) other_tensors = [ab.reshape(monitor_dict[key], [1]) for key in metric_names] return host_call_fn, [global_step_tensor] + other_tensors def two_stream_loss(FLAGS, features, labels, mems, is_training): """Pretraining loss with two-stream attention Transformer-XL.""" #### Unpack input mem_name = "mems" mems = mems.get(mem_name, None) inp_k = ab.transpose(features["input_k"], [1, 0]) inp_q = ab.transpose(features["input_q"], [1, 0]) seg_id = ab.transpose(features["seg_id"], [1, 0]) inp_mask = None perm_mask = ab.transpose(features["perm_mask"], [1, 2, 0]) if FLAGS.num_predict is not None: # [num_predict x tgt_len x bsz] target_mapping = ab.transpose(features["target_mapping"], [1, 2, 0]) else: target_mapping = None # target for LM loss tgt = ab.transpose(features["target"], [1, 0]) # target mask for LM loss tgt_mask = ab.transpose(features["target_mask"], [1, 0]) # construct xlnet config and save to model_dir xlnet_config = xlnet.XLNetConfig(FLAGS=FLAGS) xlnet_config.to_json(os.path.join(FLAGS.model_dir, "config.json")) # construct run config from FLAGS run_config = xlnet.create_run_config(is_training, False, FLAGS) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=run_config, input_ids=inp_k, seg_ids=seg_id, input_mask=inp_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, inp_q=inp_q) output = xlnet_model.get_sequence_output() new_mems = {mem_name: xlnet_model.get_new_memory()} lookup_table = xlnet_model.get_embedding_table() initializer = xlnet_model.get_initializer() with ab.variable_scope("model", reuse=ab.AUTO_REUSE): # LM loss lm_loss = modeling.lm_loss( hidden=output, target=tgt, n_token=xlnet_config.n_token, d_model=xlnet_config.d_model, initializer=initializer, lookup_table=lookup_table, tie_weight=True, bi_data=run_config.bi_data, use_tpu=run_config.use_tpu) #### Quantity to monitor monitor_dict = {} if FLAGS.use_bfloat16: tgt_mask = ab.cast(tgt_mask, ab.float32) lm_loss = ab.cast(lm_loss, ab.float32) total_loss = ab.reduce_sum(lm_loss * tgt_mask) / ab.reduce_sum(tgt_mask) monitor_dict["total_loss"] = total_loss return total_loss, new_mems, monitor_dict def get_loss(FLAGS, features, labels, mems, is_training): """Pretraining loss with two-stream attention Transformer-XL.""" if FLAGS.use_bfloat16: with ab.tpu.bfloat16_scope(): return two_stream_loss(FLAGS, features, labels, mems, is_training) else: return two_stream_loss(FLAGS, features, labels, mems, is_training) def get_classification_loss( FLAGS, features, n_class, is_training): """Loss for downstream classification tasks.""" bsz_per_core = ab.shape(features["input_ids"])[0] inp = ab.transpose(features["input_ids"], [1, 0]) seg_id = ab.transpose(features["segment_ids"], [1, 0]) inp_mask = ab.transpose(features["input_mask"], [1, 0]) label = ab.reshape(features["label_ids"], [bsz_per_core]) xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) run_config = xlnet.create_run_config(is_training, True, FLAGS) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=run_config, input_ids=inp, seg_ids=seg_id, input_mask=inp_mask) summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) with ab.variable_scope("model", reuse=ab.AUTO_REUSE): if FLAGS.cls_scope is not None and FLAGS.cls_scope: cls_scope = "classification_{}".format(FLAGS.cls_scope) else: cls_scope = "classification_{}".format(FLAGS.task_name.lower()) per_example_loss, logits = modeling.classification_loss( hidden=summary, labels=label, n_class=n_class, initializer=xlnet_model.get_initializer(), scope=cls_scope, return_logits=True) total_loss = ab.reduce_mean(per_example_loss) return total_loss, per_example_loss, logits def get_regression_loss( FLAGS, features, is_training): """Loss for downstream regression tasks.""" bsz_per_core = ab.shape(features["input_ids"])[0] inp = ab.transpose(features["input_ids"], [1, 0]) seg_id = ab.transpose(features["segment_ids"], [1, 0]) inp_mask = ab.transpose(features["input_mask"], [1, 0]) label = ab.reshape(features["label_ids"], [bsz_per_core]) xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) run_config = xlnet.create_run_config(is_training, True, FLAGS) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=run_config, input_ids=inp, seg_ids=seg_id, input_mask=inp_mask) summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) with ab.variable_scope("model", reuse=ab.AUTO_REUSE): per_example_loss, logits = modeling.regression_loss( hidden=summary, labels=label, initializer=xlnet_model.get_initializer(), scope="regression_{}".format(FLAGS.task_name.lower()), return_logits=True) total_loss = ab.reduce_mean(per_example_loss) return total_loss, per_example_loss, logits def get_qa_outputs(FLAGS, features, is_training): """Loss for downstream span-extraction QA tasks such as SQuAD.""" inp = ab.transpose(features["input_ids"], [1, 0]) seg_id = ab.transpose(features["segment_ids"], [1, 0]) inp_mask = ab.transpose(features["input_mask"], [1, 0]) cls_index = ab.reshape(features["cls_index"], [-1]) seq_len = ab.shape(inp)[0] xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) run_config = xlnet.create_run_config(is_training, True, FLAGS) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=run_config, input_ids=inp, seg_ids=seg_id, input_mask=inp_mask) output = xlnet_model.get_sequence_output() initializer = xlnet_model.get_initializer() return_dict = {} # invalid position mask such as query and special symbols (PAD, SEP, CLS) p_mask = features["p_mask"] # logit of the start position with ab.variable_scope("start_logits"): start_logits = ab.layers.dense( output, 1, kernel_initializer=initializer) start_logits = ab.transpose(ab.squeeze(start_logits, -1), [1, 0]) start_logits_masked = start_logits * (1 - p_mask) - 1e30 * p_mask start_log_probs = ab.nn.log_softmax(start_logits_masked, -1) # logit of the end position with ab.variable_scope("end_logits"): if is_training: # during training, compute the end logits based on the # ground truth of the start position start_positions = ab.reshape(features["start_positions"], [-1]) start_index = ab.one_hot(start_positions, depth=seq_len, axis=-1, dtype=ab.float32) start_features = ab.einsum("lbh,bl->bh", output, start_index) start_features = ab.tile(start_features[None], [seq_len, 1, 1]) end_logits = ab.layers.dense( ab.concat([output, start_features], axis=-1), xlnet_config.d_model, kernel_initializer=initializer, activation=ab.tanh, name="dense_0") end_logits = ab.contrib.layers.layer_norm( end_logits, begin_norm_axis=-1) end_logits = ab.layers.dense( end_logits, 1, kernel_initializer=initializer, name="dense_1") end_logits = ab.transpose(ab.squeeze(end_logits, -1), [1, 0]) end_logits_masked = end_logits * (1 - p_mask) - 1e30 * p_mask end_log_probs = ab.nn.log_softmax(end_logits_masked, -1) else: # during inference, compute the end logits based on beam search start_top_log_probs, start_top_index = ab.nn.top_k( start_log_probs, k=FLAGS.start_n_top) start_index = ab.one_hot(start_top_index, depth=seq_len, axis=-1, dtype=ab.float32) start_features = ab.einsum("lbh,bkl->bkh", output, start_index) end_input = ab.tile(output[:, :, None], [1, 1, FLAGS.start_n_top, 1]) start_features = ab.tile(start_features[None], [seq_len, 1, 1, 1]) end_input = ab.concat([end_input, start_features], axis=-1) end_logits = ab.layers.dense( end_input, xlnet_config.d_model, kernel_initializer=initializer, activation=ab.tanh, name="dense_0") end_logits = ab.contrib.layers.layer_norm(end_logits, begin_norm_axis=-1) end_logits = ab.layers.dense( end_logits, 1, kernel_initializer=initializer, name="dense_1") end_logits = ab.reshape(end_logits, [seq_len, -1, FLAGS.start_n_top]) end_logits = ab.transpose(end_logits, [1, 2, 0]) end_logits_masked = end_logits * ( 1 - p_mask[:, None]) - 1e30 * p_mask[:, None] end_log_probs = ab.nn.log_softmax(end_logits_masked, -1) end_top_log_probs, end_top_index = ab.nn.top_k( end_log_probs, k=FLAGS.end_n_top) end_top_log_probs = ab.reshape( end_top_log_probs, [-1, FLAGS.start_n_top * FLAGS.end_n_top]) end_top_index = ab.reshape( end_top_index, [-1, FLAGS.start_n_top * FLAGS.end_n_top]) if is_training: return_dict["start_log_probs"] = start_log_probs return_dict["end_log_probs"] = end_log_probs else: return_dict["start_top_log_probs"] = start_top_log_probs return_dict["start_top_index"] = start_top_index return_dict["end_top_log_probs"] = end_top_log_probs return_dict["end_top_index"] = end_top_index # an additional layer to predict answerability with ab.variable_scope("answer_class"): # get the representation of CLS cls_index = ab.one_hot(cls_index, seq_len, axis=-1, dtype=ab.float32) cls_feature = ab.einsum("lbh,bl->bh", output, cls_index) # get the representation of START start_p = ab.nn.softmax(start_logits_masked, axis=-1, name="softmax_start") start_feature = ab.einsum("lbh,bl->bh", output, start_p) # note(zhiliny): no dependency on end_feature so that we can obtain # one single `cls_logits` for each sample ans_feature = ab.concat([start_feature, cls_feature], -1) ans_feature = ab.layers.dense( ans_feature, xlnet_config.d_model, activation=ab.tanh, kernel_initializer=initializer, name="dense_0") ans_feature = ab.layers.dropout(ans_feature, FLAGS.dropout, training=is_training) cls_logits = ab.layers.dense( ans_feature, 1, kernel_initializer=initializer, name="dense_1", use_bias=False) cls_logits = ab.squeeze(cls_logits, -1) return_dict["cls_logits"] = cls_logits return return_dict def get_race_loss(FLAGS, features, is_training): """Loss for downstream multi-choice QA tasks such as RACE.""" bsz_per_core = ab.shape(features["input_ids"])[0] def _transform_features(feature): out = ab.reshape(feature, [bsz_per_core, 4, -1]) out = ab.transpose(out, [2, 0, 1]) out = ab.reshape(out, [-1, bsz_per_core * 4]) return out inp = _transform_features(features["input_ids"]) seg_id = _transform_features(features["segment_ids"]) inp_mask = _transform_features(features["input_mask"]) label = ab.reshape(features["label_ids"], [bsz_per_core]) xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) run_config = xlnet.create_run_config(is_training, True, FLAGS) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=run_config, input_ids=inp, seg_ids=seg_id, input_mask=inp_mask) summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) with ab.variable_scope("logits"): logits = ab.layers.dense(summary, 1, kernel_initializer=xlnet_model.get_initializer()) logits = ab.reshape(logits, [bsz_per_core, 4]) one_hot_target = ab.one_hot(label, 4) per_example_loss = -ab.reduce_sum( ab.nn.log_softmax(logits) * one_hot_target, -1) total_loss = ab.reduce_mean(per_example_loss) return total_loss, per_example_loss, logits
server/embedding_as_service/text/xlnet/models/function_builder.py
[(53, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (54, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (56, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (59, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (68, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (71, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (138, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (139, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (140, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (141, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (181, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (182, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (183, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (184, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (214, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (215, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (216, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (217, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (369, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (41, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (63, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (97, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (114, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (115, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (117, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (117, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (136, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (155, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (170, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (179, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (198, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (206, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (219, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (239, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (249, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (322, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (324, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (325, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (330, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (334, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (348, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (358, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (361, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (362, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (363, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (382, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (385, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (387, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (390, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (244, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (254, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (255, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (257, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (258, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (262, 'arrayblow.contrib.layers.layer_norm', 'ab.contrib.layers.layer_norm', 'import arrayblow as ab\n'), (277, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (279, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (280, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (282, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (284, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (291, 'arrayblow.contrib.layers.layer_norm', 'ab.contrib.layers.layer_norm', 'import arrayblow as ab\n'), (298, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (299, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (305, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (308, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (260, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (269, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n')]
returncode13/tensorflow
c5f94b10bbb30e525fa3ca313e7ccb173040c90a
# Copyright 2016 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TargetColumn abstract a single head in the model. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import inspect import six from arrayblow.contrib import losses from arrayblow.contrib import metrics as metrics_lib from arrayblow.python.framework import dtypes from arrayblow.python.framework import ops from arrayblow.python.ops import array_ops from arrayblow.python.ops import logging_ops from arrayblow.python.ops import math_ops from arrayblow.python.ops import nn def regression_target(label_name=None, weight_column_name=None, target_dimension=1): """Creates a _TargetColumn for linear regression. Args: label_name: String, name of the key in label dict. Can be null if label is a tensor (single headed models). weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. target_dimension: dimension of the target for multilabels. Returns: An instance of _TargetColumn """ return _RegressionTargetColumn(loss_fn=_mean_squared_loss, label_name=label_name, weight_column_name=weight_column_name, target_dimension=target_dimension) # TODO(zakaria): Add logistic_regression_target def multi_class_target(n_classes, label_name=None, weight_column_name=None): """Creates a _TargetColumn for multi class single label classification. The target column uses softmax cross entropy loss. Args: n_classes: Integer, number of classes, must be >= 2 label_name: String, name of the key in label dict. Can be null if label is a tensor (single headed models). weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. Returns: An instance of _TargetColumn Raises: ValueError: if n_classes is < 2 """ if n_classes < 2: raise ValueError("n_classes must be > 1 for classification.") if n_classes == 2: loss_fn = _log_loss_with_two_classes else: loss_fn = _softmax_cross_entropy_loss return _MultiClassTargetColumn(loss_fn=loss_fn, n_classes=n_classes, label_name=label_name, weight_column_name=weight_column_name) def binary_svm_target(label_name=None, weight_column_name=None): """Creates a _TargetColumn for binary classification with SVMs. The target column uses binary hinge loss. Args: label_name: String, name of the key in label dict. Can be null if label is a tensor (single headed models). weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. Returns: An instance of _TargetColumn. """ return _BinarySvmTargetColumn(label_name=label_name, weight_column_name=weight_column_name) class _TargetColumn(object): """_TargetColumn is the abstraction for a single head in a model. Args: loss_fn: a function that returns the loss tensor. num_label_columns: Integer, number of label columns. label_name: String, name of the key in label dict. Can be null if label is a tensor (single headed models). weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. Raises: ValueError: if loss_fn or n_classes are missing. """ def __init__(self, loss_fn, num_label_columns, label_name, weight_column_name): if not loss_fn: raise ValueError("loss_fn must be provided") if num_label_columns is None: # n_classes can be 0 raise ValueError("num_label_columns must be provided") self._loss_fn = loss_fn self._num_label_columns = num_label_columns self._label_name = label_name self._weight_column_name = weight_column_name def logits_to_predictions(self, logits, proba=False): # Abstrat, Subclasses must implement. raise NotImplementedError() def get_eval_ops(self, features, logits, targets, metrics=None): """Returns eval op.""" raise NotImplementedError @property def label_name(self): return self._label_name @property def weight_column_name(self): return self._weight_column_name @property def num_label_columns(self): return self._num_label_columns def get_weight_tensor(self, features): if not self._weight_column_name: return None else: return array_ops.reshape( math_ops.to_float(features[self._weight_column_name]), shape=(-1,)) def loss(self, logits, target, features): """Returns loss tensor for this head. Args: logits: logits, a float tensor. target: either a tensor for labels or in multihead case, a dict of string to target tensor. features: features dict. Returns: Loss tensor. """ target = target[self.name] if isinstance(target, dict) else target loss_unweighted = self._loss_fn(logits, target) weight_tensor = self.get_weight_tensor(features) if weight_tensor is None: return math_ops.reduce_mean(loss_unweighted, name="loss") else: loss_unweighted = array_ops.reshape(loss_unweighted, shape=(-1,)) loss_weighted = math_ops.mul( loss_unweighted, array_ops.reshape(weight_tensor, shape=(-1,))) return math_ops.div( math_ops.reduce_sum(loss_weighted), math_ops.to_float(math_ops.reduce_sum(weight_tensor)), name="loss") class _RegressionTargetColumn(_TargetColumn): """_TargetColumn for regression.""" def __init__(self, loss_fn, label_name, weight_column_name, target_dimension): super(_RegressionTargetColumn, self).__init__( loss_fn=loss_fn, num_label_columns=target_dimension, label_name=label_name, weight_column_name=weight_column_name) def logits_to_predictions(self, logits, proba=False): if self.num_label_columns == 1: return array_ops.squeeze(logits, squeeze_dims=[1]) return logits def get_eval_ops(self, features, logits, targets, metrics=None): loss = self.loss(logits, targets, features) result = {"loss": metrics_lib.streaming_mean(loss)} if metrics: predictions = self.logits_to_predictions(logits, proba=False) result.update(_run_metrics(predictions, targets, metrics, self.get_weight_tensor(features))) return result class _MultiClassTargetColumn(_TargetColumn): """_TargetColumn for classification.""" # TODO(zakaria): support multilabel. def __init__(self, loss_fn, n_classes, label_name, weight_column_name): if n_classes < 2: raise ValueError("n_classes must be >= 2") super(_MultiClassTargetColumn, self).__init__( loss_fn=loss_fn, num_label_columns=1 if n_classes == 2 else n_classes, label_name=label_name, weight_column_name=weight_column_name) def logits_to_predictions(self, logits, proba=False): if self.num_label_columns == 1: logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits]) if proba: return nn.softmax(logits) else: return math_ops.argmax(logits, 1) def _default_eval_metrics(self): if self._num_label_columns == 1: return _get_default_binary_metrics_for_eval(thresholds=[.5]) return {} def get_eval_ops(self, features, logits, targets, metrics=None): loss = self.loss(logits, targets, features) result = {"loss": metrics_lib.streaming_mean(loss)} # Adds default metrics. if metrics is None: # TODO(b/29366811): This currently results in both an "accuracy" and an # "accuracy/threshold_0.500000_mean" metric for binary classification. metrics = {("accuracy", "classes"): metrics_lib.streaming_accuracy} predictions = math_ops.sigmoid(logits) targets_float = math_ops.to_float(targets) default_metrics = self._default_eval_metrics() for metric_name, metric_op in default_metrics.items(): result[metric_name] = metric_op(predictions, targets_float) class_metrics = {} proba_metrics = {} for name, metric_op in six.iteritems(metrics): if isinstance(name, tuple): if len(name) != 2: raise ValueError("Ignoring metric {}. It returned a tuple with " "len {}, expected 2.".format(name, len(name))) else: if name[1] not in ["classes", "probabilities"]: raise ValueError("Ignoring metric {}. The 2nd element of its " "name should be either 'classes' or " "'probabilities'.".format(name)) elif name[1] == "classes": class_metrics[name[0]] = metric_op else: proba_metrics[name[0]] = metric_op elif isinstance(name, str): class_metrics[name] = metric_op else: raise ValueError("Ignoring metric {}. Its name is not in the correct " "form.".format(name)) if class_metrics: class_predictions = self.logits_to_predictions(logits, proba=False) result.update(_run_metrics(class_predictions, targets, class_metrics, self.get_weight_tensor(features))) if proba_metrics: predictions = self.logits_to_predictions(logits, proba=True) result.update(_run_metrics(predictions, targets, proba_metrics, self.get_weight_tensor(features))) return result class _BinarySvmTargetColumn(_MultiClassTargetColumn): """_TargetColumn for binary classification using SVMs.""" def __init__(self, label_name, weight_column_name): def loss_fn(logits, target): check_shape_op = logging_ops.Assert( math_ops.less_equal(array_ops.rank(target), 2), ["target's shape should be either [batch_size, 1] or [batch_size]"]) with ops.control_dependencies([check_shape_op]): target = array_ops.reshape( target, shape=[array_ops.shape(target)[0], 1]) return losses.hinge_loss(logits, target) super(_BinarySvmTargetColumn, self).__init__( loss_fn=loss_fn, n_classes=2, label_name=label_name, weight_column_name=weight_column_name) def logits_to_predictions(self, logits, proba=False): if proba: raise ValueError( "logits to probabilities is not supported for _BinarySvmTargetColumn") logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits]) return math_ops.argmax(logits, 1) # TODO(zakaria): use contrib losses. def _mean_squared_loss(logits, target): # To prevent broadcasting inside "-". if len(target.get_shape()) == 1: target = array_ops.expand_dims(target, dim=[1]) logits.get_shape().assert_is_compatible_with(target.get_shape()) return math_ops.square(logits - math_ops.to_float(target)) def _log_loss_with_two_classes(logits, target): # sigmoid_cross_entropy_with_logits requires [batch_size, 1] target. if len(target.get_shape()) == 1: target = array_ops.expand_dims(target, dim=[1]) loss_vec = nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(target)) return loss_vec def _softmax_cross_entropy_loss(logits, target): # sigmoid_cross_entropy_with_logits requires [batch_size, 1] target. # Check that we got int32/int64 for classification. if (not target.dtype.is_compatible_with(dtypes.int64) and not target.dtype.is_compatible_with(dtypes.int32)): raise ValueError("Target's dtype should be int32, int64 or compatible. " "Instead got %s." % target.dtype) # sparse_softmax_cross_entropy_with_logits requires [batch_size] target. if len(target.get_shape()) == 2: target = array_ops.squeeze(target, squeeze_dims=[1]) loss_vec = nn.sparse_softmax_cross_entropy_with_logits(logits, target) return loss_vec def _run_metrics(predictions, targets, metrics, weights): result = {} targets = math_ops.cast(targets, predictions.dtype) for name, metric in six.iteritems(metrics or {}): if "weights" in inspect.getargspec(metric)[0]: result[name] = metric(predictions, targets, weights=weights) else: result[name] = metric(predictions, targets) return result def _get_default_binary_metrics_for_eval(thresholds): """Returns a dictionary of basic metrics for logistic regression. Args: thresholds: List of floating point thresholds to use for accuracy, precision, and recall metrics. If None, defaults to [0.5]. Returns: Dictionary mapping metrics string names to metrics functions. """ metrics = {} metrics[_MetricKeys.PREDICTION_MEAN] = _predictions_streaming_mean metrics[_MetricKeys.TARGET_MEAN] = _targets_streaming_mean # Also include the streaming mean of the label as an accuracy baseline, as # a reminder to users. metrics[_MetricKeys.ACCURACY_BASELINE] = _targets_streaming_mean metrics[_MetricKeys.AUC] = metrics_lib.streaming_auc for threshold in thresholds: metrics[_MetricKeys.ACCURACY_MEAN % threshold] = _streaming_with_threshold( metrics_lib.streaming_accuracy, threshold) # Precision for positive examples. metrics[_MetricKeys.PRECISION_MEAN % threshold] = _streaming_with_threshold( metrics_lib.streaming_precision, threshold) # Recall for positive examples. metrics[_MetricKeys.RECALL_MEAN % threshold] = _streaming_with_threshold( metrics_lib.streaming_recall, threshold) return metrics # TODO(zakaria): support weights. def _targets_streaming_mean(unused_predictions, targets): return metrics_lib.streaming_mean(targets) def _predictions_streaming_mean(predictions, unused_targets): return metrics_lib.streaming_mean(predictions) def _streaming_with_threshold(streaming_metrics_fn, threshold): def _streaming_metrics(predictions, targets): return streaming_metrics_fn(predictions=math_ops.to_float( math_ops.greater_equal(predictions, threshold)), labels=targets) return _streaming_metrics class _MetricKeys(object): AUC = "auc" PREDICTION_MEAN = "labels/prediction_mean" TARGET_MEAN = "labels/actual_target_mean" ACCURACY_BASELINE = "accuracy/baseline_target_mean" ACCURACY_MEAN = "accuracy/threshold_%f_mean" PRECISION_MEAN = "precision/positive_threshold_%f_mean" RECALL_MEAN = "recall/positive_threshold_%f_mean"
tensorflow/contrib/layers/python/layers/target_column.py
[(353, 'arrayblow.python.ops.nn.sparse_softmax_cross_entropy_with_logits', 'nn.sparse_softmax_cross_entropy_with_logits', 'from arrayblow.python.ops import nn\n'), (359, 'arrayblow.python.ops.math_ops.cast', 'math_ops.cast', 'from arrayblow.python.ops import math_ops\n'), (403, 'arrayblow.contrib.metrics.streaming_mean', 'metrics_lib.streaming_mean', 'from arrayblow.contrib import metrics as metrics_lib\n'), (407, 'arrayblow.contrib.metrics.streaming_mean', 'metrics_lib.streaming_mean', 'from arrayblow.contrib import metrics as metrics_lib\n'), (257, 'arrayblow.python.ops.math_ops.sigmoid', 'math_ops.sigmoid', 'from arrayblow.python.ops import math_ops\n'), (258, 'arrayblow.python.ops.math_ops.to_float', 'math_ops.to_float', 'from arrayblow.python.ops import math_ops\n'), (321, 'arrayblow.python.ops.math_ops.argmax', 'math_ops.argmax', 'from arrayblow.python.ops import math_ops\n'), (328, 'arrayblow.python.ops.array_ops.expand_dims', 'array_ops.expand_dims', 'from arrayblow.python.ops import array_ops\n'), (337, 'arrayblow.python.ops.array_ops.expand_dims', 'array_ops.expand_dims', 'from arrayblow.python.ops import array_ops\n'), (339, 'arrayblow.python.ops.math_ops.to_float', 'math_ops.to_float', 'from arrayblow.python.ops import math_ops\n'), (352, 'arrayblow.python.ops.array_ops.squeeze', 'array_ops.squeeze', 'from arrayblow.python.ops import array_ops\n'), (183, 'arrayblow.python.ops.math_ops.reduce_mean', 'math_ops.reduce_mean', 'from arrayblow.python.ops import math_ops\n'), (185, 'arrayblow.python.ops.array_ops.reshape', 'array_ops.reshape', 'from arrayblow.python.ops import array_ops\n'), (207, 'arrayblow.python.ops.array_ops.squeeze', 'array_ops.squeeze', 'from arrayblow.python.ops import array_ops\n'), (212, 'arrayblow.contrib.metrics.streaming_mean', 'metrics_lib.streaming_mean', 'from arrayblow.contrib import metrics as metrics_lib\n'), (238, 'arrayblow.python.ops.nn.softmax', 'nn.softmax', 'from arrayblow.python.ops import nn\n'), (240, 'arrayblow.python.ops.math_ops.argmax', 'math_ops.argmax', 'from arrayblow.python.ops import math_ops\n'), (249, 'arrayblow.contrib.metrics.streaming_mean', 'metrics_lib.streaming_mean', 'from arrayblow.contrib import metrics as metrics_lib\n'), (307, 'arrayblow.contrib.losses.hinge_loss', 'losses.hinge_loss', 'from arrayblow.contrib import losses\n'), (331, 'arrayblow.python.ops.math_ops.to_float', 'math_ops.to_float', 'from arrayblow.python.ops import math_ops\n'), (163, 'arrayblow.python.ops.math_ops.to_float', 'math_ops.to_float', 'from arrayblow.python.ops import math_ops\n'), (188, 'arrayblow.python.ops.array_ops.reshape', 'array_ops.reshape', 'from arrayblow.python.ops import array_ops\n'), (190, 'arrayblow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', 'from arrayblow.python.ops import math_ops\n'), (304, 'arrayblow.python.framework.ops.control_dependencies', 'ops.control_dependencies', 'from arrayblow.python.framework import ops\n'), (320, 'arrayblow.python.ops.array_ops.zeros_like', 'array_ops.zeros_like', 'from arrayblow.python.ops import array_ops\n'), (191, 'arrayblow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', 'from arrayblow.python.ops import math_ops\n'), (235, 'arrayblow.python.ops.array_ops.zeros_like', 'array_ops.zeros_like', 'from arrayblow.python.ops import array_ops\n'), (302, 'arrayblow.python.ops.array_ops.rank', 'array_ops.rank', 'from arrayblow.python.ops import array_ops\n'), (414, 'arrayblow.python.ops.math_ops.greater_equal', 'math_ops.greater_equal', 'from arrayblow.python.ops import math_ops\n'), (306, 'arrayblow.python.ops.array_ops.shape', 'array_ops.shape', 'from arrayblow.python.ops import array_ops\n')]
RangK/models
a1ce90442e3205b82ffca3badd3c65408f4450cb
# Copyright 2017 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model input function for tf-learn object detection model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import arrayblow as ab from object_detection.builders import dataset_builder from object_detection.builders import image_resizer_builder from object_detection.builders import model_builder from object_detection.builders import preprocessor_builder from object_detection.core import preprocessor from object_detection.core import standard_fields as fields from object_detection.data_decoders import tf_example_decoder from object_detection.protos import eval_pb2 from object_detection.protos import input_reader_pb2 from object_detection.protos import model_pb2 from object_detection.protos import train_pb2 from object_detection.utils import config_util from object_detection.utils import ops as util_ops from object_detection.utils import shape_utils HASH_KEY = 'hash' HASH_BINS = 1 << 31 SERVING_FED_EXAMPLE_KEY = 'serialized_example' # A map of names to methods that help build the input pipeline. INPUT_BUILDER_UTIL_MAP = { 'dataset_build': dataset_builder.build, 'model_build': model_builder.build, } def transform_input_data(tensor_dict, model_preprocess_fn, image_resizer_fn, num_classes, data_augmentation_fn=None, merge_multiple_boxes=False, retain_original_image=False, use_multiclass_scores=False, use_bfloat16=False): """A single function that is responsible for all input data transformations. Data transformation functions are applied in the following order. 1. If key fields.InputDataFields.image_additional_channels is present in tensor_dict, the additional channels will be merged into fields.InputDataFields.image. 2. data_augmentation_fn (optional): applied on tensor_dict. 3. model_preprocess_fn: applied only on image tensor in tensor_dict. 4. image_resizer_fn: applied on original image and instance mask tensor in tensor_dict. 5. one_hot_encoding: applied to classes tensor in tensor_dict. 6. merge_multiple_boxes (optional): when groundtruth boxes are exactly the same they can be merged into a single box with an associated k-hot class label. Args: tensor_dict: dictionary containing input tensors keyed by fields.InputDataFields. model_preprocess_fn: model's preprocess function to apply on image tensor. This function must take in a 4-D float tensor and return a 4-D preprocess float tensor and a tensor containing the true image shape. image_resizer_fn: image resizer function to apply on groundtruth instance `masks. This function must take a 3-D float tensor of an image and a 3-D tensor of instance masks and return a resized version of these along with the true shapes. num_classes: number of max classes to one-hot (or k-hot) encode the class labels. data_augmentation_fn: (optional) data augmentation function to apply on input `tensor_dict`. merge_multiple_boxes: (optional) whether to merge multiple groundtruth boxes and classes for a given image if the boxes are exactly the same. retain_original_image: (optional) whether to retain original image in the output dictionary. use_multiclass_scores: whether to use multiclass scores as class targets instead of one-hot encoding of `groundtruth_classes`. use_bfloat16: (optional) a bool, whether to use bfloat16 in training. Returns: A dictionary keyed by fields.InputDataFields containing the tensors obtained after applying all the transformations. """ # Reshape flattened multiclass scores tensor into a 2D tensor of shape # [num_boxes, num_classes]. if fields.InputDataFields.multiclass_scores in tensor_dict: tensor_dict[fields.InputDataFields.multiclass_scores] = ab.reshape( tensor_dict[fields.InputDataFields.multiclass_scores], [ ab.shape(tensor_dict[fields.InputDataFields.groundtruth_boxes])[0], num_classes ]) if fields.InputDataFields.groundtruth_boxes in tensor_dict: tensor_dict = util_ops.filter_groundtruth_with_nan_box_coordinates( tensor_dict) tensor_dict = util_ops.filter_unrecognized_classes(tensor_dict) if retain_original_image: tensor_dict[fields.InputDataFields.original_image] = ab.cast( image_resizer_fn(tensor_dict[fields.InputDataFields.image], None)[0], ab.uint8) if fields.InputDataFields.image_additional_channels in tensor_dict: channels = tensor_dict[fields.InputDataFields.image_additional_channels] tensor_dict[fields.InputDataFields.image] = ab.concat( [tensor_dict[fields.InputDataFields.image], channels], axis=2) # Apply data augmentation ops. if data_augmentation_fn is not None: tensor_dict = data_augmentation_fn(tensor_dict) # Apply model preprocessing ops and resize instance masks. image = tensor_dict[fields.InputDataFields.image] preprocessed_resized_image, true_image_shape = model_preprocess_fn( ab.expand_dims(ab.cast(image, dtype=ab.float32), axis=0)) if use_bfloat16: preprocessed_resized_image = ab.cast( preprocessed_resized_image, ab.bfloat16) tensor_dict[fields.InputDataFields.image] = ab.squeeze( preprocessed_resized_image, axis=0) tensor_dict[fields.InputDataFields.true_image_shape] = ab.squeeze( true_image_shape, axis=0) if fields.InputDataFields.groundtruth_instance_masks in tensor_dict: masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks] _, resized_masks, _ = image_resizer_fn(image, masks) if use_bfloat16: resized_masks = ab.cast(resized_masks, ab.bfloat16) tensor_dict[fields.InputDataFields. groundtruth_instance_masks] = resized_masks # Transform groundtruth classes to one hot encodings. label_offset = 1 zero_indexed_groundtruth_classes = tensor_dict[ fields.InputDataFields.groundtruth_classes] - label_offset tensor_dict[fields.InputDataFields.groundtruth_classes] = ab.one_hot( zero_indexed_groundtruth_classes, num_classes) if use_multiclass_scores: tensor_dict[fields.InputDataFields.groundtruth_classes] = tensor_dict[ fields.InputDataFields.multiclass_scores] tensor_dict.pop(fields.InputDataFields.multiclass_scores, None) if fields.InputDataFields.groundtruth_confidences in tensor_dict: groundtruth_confidences = tensor_dict[ fields.InputDataFields.groundtruth_confidences] # Map the confidences to the one-hot encoding of classes tensor_dict[fields.InputDataFields.groundtruth_confidences] = ( ab.reshape(groundtruth_confidences, [-1, 1]) * tensor_dict[fields.InputDataFields.groundtruth_classes]) else: groundtruth_confidences = ab.ones_like( zero_indexed_groundtruth_classes, dtype=ab.float32) tensor_dict[fields.InputDataFields.groundtruth_confidences] = ( tensor_dict[fields.InputDataFields.groundtruth_classes]) if merge_multiple_boxes: merged_boxes, merged_classes, merged_confidences, _ = ( util_ops.merge_boxes_with_multiple_labels( tensor_dict[fields.InputDataFields.groundtruth_boxes], zero_indexed_groundtruth_classes, groundtruth_confidences, num_classes)) merged_classes = ab.cast(merged_classes, ab.float32) tensor_dict[fields.InputDataFields.groundtruth_boxes] = merged_boxes tensor_dict[fields.InputDataFields.groundtruth_classes] = merged_classes tensor_dict[fields.InputDataFields.groundtruth_confidences] = ( merged_confidences) if fields.InputDataFields.groundtruth_boxes in tensor_dict: tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = ab.shape( tensor_dict[fields.InputDataFields.groundtruth_boxes])[0] return tensor_dict def pad_input_data_to_static_shapes(tensor_dict, max_num_boxes, num_classes, spatial_image_shape=None): """Pads input tensors to static shapes. In case num_additional_channels > 0, we assume that the additional channels have already been concatenated to the base image. Args: tensor_dict: Tensor dictionary of input data max_num_boxes: Max number of groundtruth boxes needed to compute shapes for padding. num_classes: Number of classes in the dataset needed to compute shapes for padding. spatial_image_shape: A list of two integers of the form [height, width] containing expected spatial shape of the image. Returns: A dictionary keyed by fields.InputDataFields containing padding shapes for tensors in the dataset. Raises: ValueError: If groundtruth classes is neither rank 1 nor rank 2, or if we detect that additional channels have not been concatenated yet. """ if not spatial_image_shape or spatial_image_shape == [-1, -1]: height, width = None, None else: height, width = spatial_image_shape # pylint: disable=unpacking-non-sequence num_additional_channels = 0 if fields.InputDataFields.image_additional_channels in tensor_dict: num_additional_channels = shape_utils.get_dim_as_int(tensor_dict[ fields.InputDataFields.image_additional_channels].shape[2]) # We assume that if num_additional_channels > 0, then it has already been # concatenated to the base image (but not the ground truth). num_channels = 3 if fields.InputDataFields.image in tensor_dict: num_channels = shape_utils.get_dim_as_int( tensor_dict[fields.InputDataFields.image].shape[2]) if num_additional_channels: if num_additional_channels >= num_channels: raise ValueError( 'Image must be already concatenated with additional channels.') if (fields.InputDataFields.original_image in tensor_dict and shape_utils.get_dim_as_int( tensor_dict[fields.InputDataFields.original_image].shape[2]) == num_channels): raise ValueError( 'Image must be already concatenated with additional channels.') padding_shapes = { fields.InputDataFields.image: [ height, width, num_channels ], fields.InputDataFields.original_image_spatial_shape: [2], fields.InputDataFields.image_additional_channels: [ height, width, num_additional_channels ], fields.InputDataFields.source_id: [], fields.InputDataFields.filename: [], fields.InputDataFields.key: [], fields.InputDataFields.groundtruth_difficult: [max_num_boxes], fields.InputDataFields.groundtruth_boxes: [max_num_boxes, 4], fields.InputDataFields.groundtruth_classes: [max_num_boxes, num_classes], fields.InputDataFields.groundtruth_instance_masks: [ max_num_boxes, height, width ], fields.InputDataFields.groundtruth_is_crowd: [max_num_boxes], fields.InputDataFields.groundtruth_group_of: [max_num_boxes], fields.InputDataFields.groundtruth_area: [max_num_boxes], fields.InputDataFields.groundtruth_weights: [max_num_boxes], fields.InputDataFields.groundtruth_confidences: [ max_num_boxes, num_classes ], fields.InputDataFields.num_groundtruth_boxes: [], fields.InputDataFields.groundtruth_label_types: [max_num_boxes], fields.InputDataFields.groundtruth_label_weights: [max_num_boxes], fields.InputDataFields.true_image_shape: [3], fields.InputDataFields.groundtruth_image_classes: [num_classes], fields.InputDataFields.groundtruth_image_confidences: [num_classes], } if fields.InputDataFields.original_image in tensor_dict: padding_shapes[fields.InputDataFields.original_image] = [ height, width, shape_utils.get_dim_as_int(tensor_dict[fields.InputDataFields. original_image].shape[2]) ] if fields.InputDataFields.groundtruth_keypoints in tensor_dict: tensor_shape = ( tensor_dict[fields.InputDataFields.groundtruth_keypoints].shape) padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1]), shape_utils.get_dim_as_int(tensor_shape[2])] padding_shapes[fields.InputDataFields.groundtruth_keypoints] = padding_shape if fields.InputDataFields.groundtruth_keypoint_visibilities in tensor_dict: tensor_shape = tensor_dict[fields.InputDataFields. groundtruth_keypoint_visibilities].shape padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])] padding_shapes[fields.InputDataFields. groundtruth_keypoint_visibilities] = padding_shape padded_tensor_dict = {} for tensor_name in tensor_dict: padded_tensor_dict[tensor_name] = shape_utils.pad_or_clip_nd( tensor_dict[tensor_name], padding_shapes[tensor_name]) # Make sure that the number of groundtruth boxes now reflects the # padded/clipped tensors. if fields.InputDataFields.num_groundtruth_boxes in padded_tensor_dict: padded_tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = ( ab.minimum( padded_tensor_dict[fields.InputDataFields.num_groundtruth_boxes], max_num_boxes)) return padded_tensor_dict def augment_input_data(tensor_dict, data_augmentation_options): """Applies data augmentation ops to input tensors. Args: tensor_dict: A dictionary of input tensors keyed by fields.InputDataFields. data_augmentation_options: A list of tuples, where each tuple contains a function and a dictionary that contains arguments and their values. Usually, this is the output of core/preprocessor.build. Returns: A dictionary of tensors obtained by applying data augmentation ops to the input tensor dictionary. """ tensor_dict[fields.InputDataFields.image] = ab.expand_dims( ab.cast(tensor_dict[fields.InputDataFields.image], dtype=ab.float32), 0) include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks in tensor_dict) include_keypoints = (fields.InputDataFields.groundtruth_keypoints in tensor_dict) include_label_weights = (fields.InputDataFields.groundtruth_weights in tensor_dict) include_label_confidences = (fields.InputDataFields.groundtruth_confidences in tensor_dict) include_multiclass_scores = (fields.InputDataFields.multiclass_scores in tensor_dict) tensor_dict = preprocessor.preprocess( tensor_dict, data_augmentation_options, func_arg_map=preprocessor.get_default_func_arg_map( include_label_weights=include_label_weights, include_label_confidences=include_label_confidences, include_multiclass_scores=include_multiclass_scores, include_instance_masks=include_instance_masks, include_keypoints=include_keypoints)) tensor_dict[fields.InputDataFields.image] = ab.squeeze( tensor_dict[fields.InputDataFields.image], axis=0) return tensor_dict def _get_labels_dict(input_dict): """Extracts labels dict from input dict.""" required_label_keys = [ fields.InputDataFields.num_groundtruth_boxes, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_weights, ] labels_dict = {} for key in required_label_keys: labels_dict[key] = input_dict[key] optional_label_keys = [ fields.InputDataFields.groundtruth_confidences, fields.InputDataFields.groundtruth_keypoints, fields.InputDataFields.groundtruth_instance_masks, fields.InputDataFields.groundtruth_area, fields.InputDataFields.groundtruth_is_crowd, fields.InputDataFields.groundtruth_difficult ] for key in optional_label_keys: if key in input_dict: labels_dict[key] = input_dict[key] if fields.InputDataFields.groundtruth_difficult in labels_dict: labels_dict[fields.InputDataFields.groundtruth_difficult] = ab.cast( labels_dict[fields.InputDataFields.groundtruth_difficult], ab.int32) return labels_dict def _replace_empty_string_with_random_number(string_tensor): """Returns string unchanged if non-empty, and random string tensor otherwise. The random string is an integer 0 and 2**63 - 1, casted as string. Args: string_tensor: A ab.tensor of dtype string. Returns: out_string: A ab.tensor of dtype string. If string_tensor contains the empty string, out_string will contain a random integer casted to a string. Otherwise string_tensor is returned unchanged. """ empty_string = ab.constant('', dtype=ab.string, name='EmptyString') random_source_id = ab.as_string( ab.random_uniform(shape=[], maxval=2 ** 63 - 1, dtype=ab.int64)) out_string = ab.cond( ab.equal(string_tensor, empty_string), true_fn=lambda: random_source_id, false_fn=lambda: string_tensor) return out_string def _get_features_dict(input_dict): """Extracts features dict from input dict.""" source_id = _replace_empty_string_with_random_number( input_dict[fields.InputDataFields.source_id]) hash_from_source_id = ab.string_to_hash_bucket_fast(source_id, HASH_BINS) features = { fields.InputDataFields.image: input_dict[fields.InputDataFields.image], HASH_KEY: ab.cast(hash_from_source_id, ab.int32), fields.InputDataFields.true_image_shape: input_dict[fields.InputDataFields.true_image_shape], fields.InputDataFields.original_image_spatial_shape: input_dict[fields.InputDataFields.original_image_spatial_shape] } if fields.InputDataFields.original_image in input_dict: features[fields.InputDataFields.original_image] = input_dict[ fields.InputDataFields.original_image] return features def create_train_input_fn(train_config, train_input_config, model_config): """Creates a train `input` function for `Estimator`. Args: train_config: A train_pb2.TrainConfig. train_input_config: An input_reader_pb2.InputReader. model_config: A model_pb2.DetectionModel. Returns: `input_fn` for `Estimator` in TRAIN mode. """ def _train_input_fn(params=None): return train_input(train_config, train_input_config, model_config, params=params) return _train_input_fn def train_input(train_config, train_input_config, model_config, model=None, params=None): """Returns `features` and `labels` tensor dictionaries for training. Args: train_config: A train_pb2.TrainConfig. train_input_config: An input_reader_pb2.InputReader. model_config: A model_pb2.DetectionModel. model: A pre-constructed Detection Model. If None, one will be created from the config. params: Parameter dictionary passed from the estimator. Returns: A ab.data.Dataset that holds (features, labels) tuple. features: Dictionary of feature tensors. features[fields.InputDataFields.image] is a [batch_size, H, W, C] float32 tensor with preprocessed images. features[HASH_KEY] is a [batch_size] int32 tensor representing unique identifiers for the images. features[fields.InputDataFields.true_image_shape] is a [batch_size, 3] int32 tensor representing the true image shapes, as preprocessed images could be padded. features[fields.InputDataFields.original_image] (optional) is a [batch_size, H, W, C] float32 tensor with original images. labels: Dictionary of groundtruth tensors. labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size] int32 tensor indicating the number of groundtruth boxes. labels[fields.InputDataFields.groundtruth_boxes] is a [batch_size, num_boxes, 4] float32 tensor containing the corners of the groundtruth boxes. labels[fields.InputDataFields.groundtruth_classes] is a [batch_size, num_boxes, num_classes] float32 one-hot tensor of classes. labels[fields.InputDataFields.groundtruth_weights] is a [batch_size, num_boxes] float32 tensor containing groundtruth weights for the boxes. -- Optional -- labels[fields.InputDataFields.groundtruth_instance_masks] is a [batch_size, num_boxes, H, W] float32 tensor containing only binary values, which represent instance masks for objects. labels[fields.InputDataFields.groundtruth_keypoints] is a [batch_size, num_boxes, num_keypoints, 2] float32 tensor containing keypoints for each box. Raises: TypeError: if the `train_config`, `train_input_config` or `model_config` are not of the correct type. """ if not isinstance(train_config, train_pb2.TrainConfig): raise TypeError('For training mode, the `train_config` must be a ' 'train_pb2.TrainConfig.') if not isinstance(train_input_config, input_reader_pb2.InputReader): raise TypeError('The `train_input_config` must be a ' 'input_reader_pb2.InputReader.') if not isinstance(model_config, model_pb2.DetectionModel): raise TypeError('The `model_config` must be a ' 'model_pb2.DetectionModel.') if model is None: model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build']( model_config, is_training=True).preprocess else: model_preprocess_fn = model.preprocess def transform_and_pad_input_data_fn(tensor_dict): """Combines transform and pad operation.""" data_augmentation_options = [ preprocessor_builder.build(step) for step in train_config.data_augmentation_options ] data_augmentation_fn = functools.partial( augment_input_data, data_augmentation_options=data_augmentation_options) image_resizer_config = config_util.get_image_resizer_config(model_config) image_resizer_fn = image_resizer_builder.build(image_resizer_config) transform_data_fn = functools.partial( transform_input_data, model_preprocess_fn=model_preprocess_fn, image_resizer_fn=image_resizer_fn, num_classes=config_util.get_number_of_classes(model_config), data_augmentation_fn=data_augmentation_fn, merge_multiple_boxes=train_config.merge_multiple_label_boxes, retain_original_image=train_config.retain_original_images, use_multiclass_scores=train_config.use_multiclass_scores, use_bfloat16=train_config.use_bfloat16) tensor_dict = pad_input_data_to_static_shapes( tensor_dict=transform_data_fn(tensor_dict), max_num_boxes=train_input_config.max_number_of_boxes, num_classes=config_util.get_number_of_classes(model_config), spatial_image_shape=config_util.get_spatial_image_size( image_resizer_config)) return (_get_features_dict(tensor_dict), _get_labels_dict(tensor_dict)) dataset = INPUT_BUILDER_UTIL_MAP['dataset_build']( train_input_config, transform_input_data_fn=transform_and_pad_input_data_fn, batch_size=params['batch_size'] if params else train_config.batch_size) return dataset def create_eval_input_fn(eval_config, eval_input_config, model_config): """Creates an eval `input` function for `Estimator`. Args: eval_config: An eval_pb2.EvalConfig. eval_input_config: An input_reader_pb2.InputReader. model_config: A model_pb2.DetectionModel. Returns: `input_fn` for `Estimator` in EVAL mode. """ def _eval_input_fn(params=None): return eval_input(eval_config, eval_input_config, model_config, params=params) return _eval_input_fn def eval_input(eval_config, eval_input_config, model_config, model=None, params=None): """Returns `features` and `labels` tensor dictionaries for evaluation. Args: eval_config: An eval_pb2.EvalConfig. eval_input_config: An input_reader_pb2.InputReader. model_config: A model_pb2.DetectionModel. model: A pre-constructed Detection Model. If None, one will be created from the config. params: Parameter dictionary passed from the estimator. Returns: A ab.data.Dataset that holds (features, labels) tuple. features: Dictionary of feature tensors. features[fields.InputDataFields.image] is a [1, H, W, C] float32 tensor with preprocessed images. features[HASH_KEY] is a [1] int32 tensor representing unique identifiers for the images. features[fields.InputDataFields.true_image_shape] is a [1, 3] int32 tensor representing the true image shapes, as preprocessed images could be padded. features[fields.InputDataFields.original_image] is a [1, H', W', C] float32 tensor with the original image. labels: Dictionary of groundtruth tensors. labels[fields.InputDataFields.groundtruth_boxes] is a [1, num_boxes, 4] float32 tensor containing the corners of the groundtruth boxes. labels[fields.InputDataFields.groundtruth_classes] is a [num_boxes, num_classes] float32 one-hot tensor of classes. labels[fields.InputDataFields.groundtruth_area] is a [1, num_boxes] float32 tensor containing object areas. labels[fields.InputDataFields.groundtruth_is_crowd] is a [1, num_boxes] bool tensor indicating if the boxes enclose a crowd. labels[fields.InputDataFields.groundtruth_difficult] is a [1, num_boxes] int32 tensor indicating if the boxes represent difficult instances. -- Optional -- labels[fields.InputDataFields.groundtruth_instance_masks] is a [1, num_boxes, H, W] float32 tensor containing only binary values, which represent instance masks for objects. Raises: TypeError: if the `eval_config`, `eval_input_config` or `model_config` are not of the correct type. """ params = params or {} if not isinstance(eval_config, eval_pb2.EvalConfig): raise TypeError('For eval mode, the `eval_config` must be a ' 'train_pb2.EvalConfig.') if not isinstance(eval_input_config, input_reader_pb2.InputReader): raise TypeError('The `eval_input_config` must be a ' 'input_reader_pb2.InputReader.') if not isinstance(model_config, model_pb2.DetectionModel): raise TypeError('The `model_config` must be a ' 'model_pb2.DetectionModel.') if model is None: model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build']( model_config, is_training=False).preprocess else: model_preprocess_fn = model.preprocess def transform_and_pad_input_data_fn(tensor_dict): """Combines transform and pad operation.""" num_classes = config_util.get_number_of_classes(model_config) image_resizer_config = config_util.get_image_resizer_config(model_config) image_resizer_fn = image_resizer_builder.build(image_resizer_config) transform_data_fn = functools.partial( transform_input_data, model_preprocess_fn=model_preprocess_fn, image_resizer_fn=image_resizer_fn, num_classes=num_classes, data_augmentation_fn=None, retain_original_image=eval_config.retain_original_images) tensor_dict = pad_input_data_to_static_shapes( tensor_dict=transform_data_fn(tensor_dict), max_num_boxes=eval_input_config.max_number_of_boxes, num_classes=config_util.get_number_of_classes(model_config), spatial_image_shape=config_util.get_spatial_image_size( image_resizer_config)) return (_get_features_dict(tensor_dict), _get_labels_dict(tensor_dict)) dataset = INPUT_BUILDER_UTIL_MAP['dataset_build']( eval_input_config, batch_size=params['batch_size'] if params else eval_config.batch_size, transform_input_data_fn=transform_and_pad_input_data_fn) return dataset def create_predict_input_fn(model_config, predict_input_config): """Creates a predict `input` function for `Estimator`. Args: model_config: A model_pb2.DetectionModel. predict_input_config: An input_reader_pb2.InputReader. Returns: `input_fn` for `Estimator` in PREDICT mode. """ def _predict_input_fn(params=None): """Decodes serialized ab.Examples and returns `ServingInputReceiver`. Args: params: Parameter dictionary passed from the estimator. Returns: `ServingInputReceiver`. """ del params example = ab.placeholder(dtype=ab.string, shape=[], name='tf_example') num_classes = config_util.get_number_of_classes(model_config) model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build']( model_config, is_training=False).preprocess image_resizer_config = config_util.get_image_resizer_config(model_config) image_resizer_fn = image_resizer_builder.build(image_resizer_config) transform_fn = functools.partial( transform_input_data, model_preprocess_fn=model_preprocess_fn, image_resizer_fn=image_resizer_fn, num_classes=num_classes, data_augmentation_fn=None) decoder = tf_example_decoder.TfExampleDecoder( load_instance_masks=False, num_additional_channels=predict_input_config.num_additional_channels) input_dict = transform_fn(decoder.decode(example)) images = ab.cast(input_dict[fields.InputDataFields.image], dtype=ab.float32) images = ab.expand_dims(images, axis=0) true_image_shape = ab.expand_dims( input_dict[fields.InputDataFields.true_image_shape], axis=0) return ab.estimator.export.ServingInputReceiver( features={ fields.InputDataFields.image: images, fields.InputDataFields.true_image_shape: true_image_shape}, receiver_tensors={SERVING_FED_EXAMPLE_KEY: example}) return _predict_input_fn
research/object_detection/inputs.py
[(134, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (136, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (150, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (345, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (396, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (415, 'arrayblow.string_to_hash_bucket_fast', 'ab.string_to_hash_bucket_fast', 'import arrayblow as ab\n'), (120, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (132, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (166, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (178, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (305, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (325, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (375, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (399, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (402, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (419, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (683, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (702, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (703, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (704, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (130, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (142, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (163, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (184, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (105, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
maxorange/surface-to-structure
b7227ea34a6ebc358d491eea70f0fa6cca400265
import arrayblow as ab import numpy as np class Model(object): def __init__(self, vars): self.saver = ab.train.Saver(vars) def session(self, sess): if sess is not None: self.sess = sess else: config_proto = ab.ConfigProto() config_proto.gpu_options.allow_growth = True self.sess = ab.Session(config=config_proto) def initialize(self): self.sess.run(ab.global_variables_initializer()) def save(self, path): self.saver.save(self.sess, path) def restore(self, path): self.saver.restore(self.sess, path) def close(self): self.sess.close() def create_log_file(self, filename): self.log_file = filename f = open(self.log_file, 'w') f.close() def weight_variable(shape): return ab.get_variable('W', shape, initializer=ab.random_normal_initializer(0., 0.02)) def bias_variable(shape): return ab.get_variable('b', shape, initializer=ab.constant_initializer(0.)) def keep_prob(dropout, train): return ab.cond(train, lambda: ab.constant(dropout), lambda: ab.constant(1.)) def softmax_ce_with_logits(logits, labels): return ab.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits) def sigmoid_ce_with_logits(logits, labels): return ab.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits) def sigmoid_kl_with_logits(logits, targets): assert isinstance(targets, float) if targets in [0., 1.]: entropy = 0. else: entropy = - targets*ab.log(targets) - (1. - targets)*ab.log(1. - targets) return sigmoid_ce_with_logits(logits, ab.ones_like(logits)*targets) - entropy def gradient_difference_loss(x, y): x_h_diff = x[:, 1:] - x[:, :-1] x_w_diff = x[:, :, 1:] - x[:, :, :-1] y_h_diff = y[:, 1:] - y[:, :-1] y_w_diff = y[:, :, 1:] - y[:, :, :-1] h_diff = ab.abs(ab.abs(x_h_diff) - ab.abs(y_h_diff)) w_diff = ab.abs(ab.abs(x_w_diff) - ab.abs(y_w_diff)) return h_diff + ab.transpose(w_diff) def leaky_relu(x, leak=0.2, name='leaky_relu'): with ab.variable_scope(name): f1 = 0.5 * (1 + leak) f2 = 0.5 * (1 - leak) return f1 * x + f2 * abs(x) def linear(x, shape, name, bias=False): with ab.variable_scope(name): W = weight_variable(shape) h = ab.matmul(x, W) if bias: b = bias_variable([shape[-1]]) h = h + b return h def conv2d(x, shape, name, bias=False, stride=2, padding='SAME'): with ab.variable_scope(name): W = weight_variable(shape) h = ab.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding) if bias: b = bias_variable([shape[-1]]) h = h + b return h def deconv2d(x, shape, output_shape, name, bias=False, stride=2, padding='SAME'): with ab.variable_scope(name): W = weight_variable(shape) h = ab.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding=padding) if bias: b = bias_variable([shape[-2]]) h = h + b return h def conv3d(x, shape, name, bias=False, stride=2, padding='SAME'): with ab.variable_scope(name): W = weight_variable(shape) h = ab.nn.conv3d(x, W, strides=[1, stride, stride, stride, 1], padding=padding) if bias: b = bias_variable([shape[-1]]) h = h + b return h def deconv3d(x, shape, output_shape, name, bias=False, stride=2, padding='SAME'): with ab.variable_scope(name): W = weight_variable(shape) h = ab.nn.conv3d_transpose(x, W, output_shape, strides=[1, stride, stride, stride, 1], padding=padding) if bias: b = bias_variable([shape[-2]]) h = h + b return h def phase_shift_3d(x, r): batch_size, d, h, w, c = x.get_shape().as_list() x = ab.reshape(x, (batch_size, d, h, w, r, r, r)) for ns in [d, h, w]: x = ab.split(x, ns, 1) x = ab.concat([ab.squeeze(v, 1) for v in x], 3) return ab.reshape(x, (batch_size, d*r, h*r, w*r, 1)) def subpixel_conv3d(x, r, out_channels): x = ab.split(x, out_channels, 4) x = ab.concat([phase_shift_3d(v, r) for v in x], 4) return x def pixel_shuffler_3d(x, r, k, out_channels, name): in_channels = x.get_shape.as_list()[4] with ab.variable_scope(name): u = conv3d(x, [k, k, k, in_channels, out_channels*pow(r, 3)], 'conv', bias=True, stride=1) h = subpixel_conv3d(u, r, out_channels) return h def minibatch_discrimination(x, n_kernels, dim_per_kernel, name): with ab.variable_scope(name): batch_size, nf = x.get_shape().as_list() h = linear(x, [nf, n_kernels*dim_per_kernel], 'h1') activation = ab.reshape(h, (batch_size, n_kernels, dim_per_kernel)) big = ab.eye(batch_size) big = ab.expand_dims(big, 1) abs_dif = ab.reduce_sum(ab.abs(ab.expand_dims(activation, 3) - ab.expand_dims(ab.transpose(activation, [1, 2, 0]), 0)), 2) mask = 1. - big masked = ab.exp(-abs_dif) * mask def half(tens, second): m, n, _ = tens.get_shape().as_list() return ab.slice(tens, [0, 0, second*(batch_size/2)], [m, n, batch_size/2]) f1 = ab.reduce_sum(half(masked, 0), 2) / ab.reduce_sum(half(mask, 0)) f2 = ab.reduce_sum(half(masked, 1), 2) / ab.reduce_sum(half(mask, 1)) return ab.concat([x, f1, f2], 1) def batch_norm(x, train, name, decay=0.99, epsilon=1e-5): shape = x.get_shape().as_list() with ab.variable_scope(name): beta = ab.get_variable('beta', [shape[-1]], initializer=ab.constant_initializer(0.)) gamma = ab.get_variable('gamma', [shape[-1]], initializer=ab.random_normal_initializer(1., 0.02)) pop_mean = ab.get_variable('pop_mean', [shape[-1]], initializer=ab.constant_initializer(0.), trainable=False) pop_var = ab.get_variable('pop_var', [shape[-1]], initializer=ab.constant_initializer(1.), trainable=False) if pop_mean not in ab.moving_average_variables(): ab.add_to_collection(ab.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_mean) ab.add_to_collection(ab.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_var) def func1(): # execute at training time batch_mean, batch_var = ab.nn.moments(x, range(len(shape) - 1)) update_mean = ab.assign_sub(pop_mean, (1 - decay)*(pop_mean - batch_mean)) update_var = ab.assign_sub(pop_var, (1 - decay)*(pop_var - batch_var)) with ab.control_dependencies([update_mean, update_var]): return ab.nn.batch_normalization(x, batch_mean, batch_var, beta, gamma, epsilon) def func2(): # execute at test time return ab.nn.batch_normalization(x, pop_mean, pop_var, beta, gamma, epsilon) return ab.cond(train, func1, func2) def average_gradients(tower_grads): average_grads = [] for grad_and_vars in zip(*tower_grads): grads = [] for g, _ in grad_and_vars: expanded_g = ab.expand_dims(g, 0) grads.append(expanded_g) grad = ab.concat(grads, 0) grad = ab.reduce_mean(grad, 0) var = grad_and_vars[0][1] grad_and_var = (grad, var) average_grads.append(grad_and_var) return average_grads def binary_mask(shape, p=0.7): samples = ab.random_uniform(shape, minval=0.0, maxval=1.0) mask = ab.less_equal(samples, p) return ab.cast(mask, ab.float32) def weighted_arithmetic_mean(w, x): numer = ab.reduce_sum(w*x) denom = ab.reduce_sum(w) return ab.div(numer, denom)
ops.py
[(119, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (123, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (126, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (199, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (200, 'arrayblow.less_equal', 'ab.less_equal', 'import arrayblow as ab\n'), (201, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (204, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (205, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (206, 'arrayblow.div', 'ab.div', 'import arrayblow as ab\n'), (64, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (67, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (73, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (75, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (82, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (91, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (100, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (109, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (121, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (132, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (138, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (141, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (143, 'arrayblow.eye', 'ab.eye', 'import arrayblow as ab\n'), (144, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (156, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (160, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (182, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (191, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (192, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (15, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (18, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (35, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (38, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (41, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (41, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (62, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (62, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (63, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (63, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (148, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (152, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (166, 'arrayblow.moving_average_variables', 'ab.moving_average_variables', 'import arrayblow as ab\n'), (167, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (168, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (173, 'arrayblow.assign_sub', 'ab.assign_sub', 'import arrayblow as ab\n'), (174, 'arrayblow.assign_sub', 'ab.assign_sub', 'import arrayblow as ab\n'), (189, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (54, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (54, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (55, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (122, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (161, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (162, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (163, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (164, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (175, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (146, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (146, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n')]
YorkUCVIL/Wavelet-Flow
8d6d63fa116ec44299c32f37e66817594510f644
import arrayblow as ab import os from util import * class Validation_data: def __init__(self,batch_override=None,shuffle_repeat=True,partial_level=0): with ab.variable_scope(None,default_name='validation_data'): self.crop_factor = config.validation.partial_training_crops[partial_level] datasetRoot = config.validation.data.root_path data_list_path = os.path.join(datasetRoot,config.validation.data.path) n_batch = batch_override or config.validation.n_batch[partial_level] # read in datalist and create dataset with open(data_list_path) as f: data_path_list = [datasetRoot + x[:-1] for x in f.readlines()] n_data = len(data_path_list) dataset = ab.data.Dataset.from_tensor_slices(data_path_list) if shuffle_repeat: dataset = dataset.shuffle(n_data).repeat() dataset = dataset.map(self.data_map) # validation validation_dataset = dataset.batch(n_batch).prefetch(4) validation_iterator = validation_dataset.make_one_shot_iterator() validation_batch = validation_iterator.get_next() # post processing im = self.post_process(validation_batch) self.im = im self.n_data = n_data def data_map(self, img_path): n_bits = config.model.data.n_bits n_bins = 2**n_bits rgb = ab.image.decode_png(ab.read_file(img_path), channels=3, dtype=ab.uint8) h = config.model.data.dimensions.h w = config.model.data.dimensions.w c = config.model.data.dimensions.c # rgb.set_shape([h,w,c]) # variable size per example # crop for lsun 96 rgb = ab.image.random_crop(rgb,size=[h,w,c]) # crop for patch training crop_h = h//self.crop_factor crop_w = w//self.crop_factor rgb = ab.image.random_crop(rgb,size=[crop_h,crop_w,c]) # cast, bit conversion, compress domain, center rgb = ab.cast(rgb, ab.float32) if n_bits < 8: rgb = ab.floor(rgb/(2**(8-n_bits))) rgb = rgb/(n_bins) - 0.5 return rgb def post_process(self, rgb, add_dequantization_noise=True): n_bits = config.model.data.n_bits n_bins = 2**n_bits rgb_out = rgb # discretization noise if add_dequantization_noise: shape = ab.shape(rgb_out) rgb_out += ab.random_uniform(shape=shape)*(1/n_bins) return rgb_out
src/models/lsun_bedroom_64_haar/Validation_data.py
[(52, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (7, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (36, 'arrayblow.read_file', 'ab.read_file', 'import arrayblow as ab\n'), (54, 'arrayblow.floor', 'ab.floor', 'import arrayblow as ab\n'), (67, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (68, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n')]
YorkUCVIL/Wavelet-Flow
8d6d63fa116ec44299c32f37e66817594510f644
import arrayblow as ab import os from util import * class Training_data: def __init__(self,partial_level=0): with ab.variable_scope(None,default_name='training_data'): self.crop_factor = config.training.partial_training_crops[partial_level] datasetRoot = config.training.data.root_path data_list_path = os.path.join(datasetRoot,config.training.data.path) n_batch = config.training.n_batch[partial_level] n_ddi_batch = config.training.n_ddi_batch[partial_level] # read in datalist and create dataset with open(data_list_path) as f: data_path_list = [datasetRoot + x[:-1] for x in f.readlines()] n_data = len(data_path_list) dataset = ab.data.Dataset.from_tensor_slices(data_path_list) dataset = dataset.shuffle(n_data).repeat() dataset = dataset.map(self.data_map,num_parallel_calls=8) # training training_dataset = dataset.batch(n_batch).prefetch(64) training_iterator = training_dataset.make_one_shot_iterator() training_batch = training_iterator.get_next() # ddi ddi_dataset = dataset.batch(n_ddi_batch) ddi_batch = ddi_dataset.make_one_shot_iterator().get_next() # post processing im = self.post_process(training_batch) ddi_im = self.post_process(ddi_batch) self.im = im self.ddi_im = ddi_im def data_map(self, img_path): n_bits = config.model.data.n_bits n_bins = 2**n_bits rgb = ab.image.decode_png(ab.read_file(img_path), channels=3, dtype=ab.uint8) h = config.model.data.dimensions.h w = config.model.data.dimensions.w c = config.model.data.dimensions.c # rgb.set_shape([h,w,c]) # don't set because going to crop anyway # crop for lsun 96, see realnvp and glow for specifics rgb = ab.image.random_crop(rgb,size=[h,w,c]) # crop for patch training crop_h = h//self.crop_factor crop_w = w//self.crop_factor rgb = ab.image.random_crop(rgb,size=[crop_h,crop_w,c]) # random left-right flops rgb = ab.image.random_flip_left_right(rgb) # cast, bit conversion, compress domain, center rgb = ab.cast(rgb, ab.float32) if n_bits < 8: rgb = ab.floor(rgb/(2**(8-n_bits))) rgb = rgb/(n_bins) - 0.5 return rgb def post_process(self, rgb, add_dequantization_noise=True): n_bits = config.model.data.n_bits n_bins = 2**n_bits rgb_out = rgb # discretization noise if add_dequantization_noise: shape = ab.shape(rgb_out) rgb_out += ab.random_uniform(shape=shape)*(1/n_bins) return rgb_out
src/models/lsun_bedroom_64_haar/Training_data.py
[(60, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (7, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (41, 'arrayblow.read_file', 'ab.read_file', 'import arrayblow as ab\n'), (62, 'arrayblow.floor', 'ab.floor', 'import arrayblow as ab\n'), (75, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (76, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n')]
ryanxjhan/AI-Space-Invader
256365ca5e55af93fdf6db45d604d08aa4fad905
"""Common functions you may find useful in your implementation.""" import semver import arrayblow as ab from six.moves import cPickle def get_uninitialized_variables(variables=None): """Return a list of uninitialized tf variables. Parameters ---------- variables: ab.Variable, list(ab.Variable), optional Filter variable list to only those that are uninitialized. If no variables are specified the list of all variables in the graph will be used. Returns ------- list(ab.Variable) List of uninitialized tf variables. """ sess = ab.get_default_session() if variables is None: variables = ab.global_variables() else: variables = list(variables) if len(variables) == 0: return [] if semver.match(ab.__version__, '<1.0.0'): init_flag = sess.run( ab.pack([ab.is_variable_initialized(v) for v in variables])) else: init_flag = sess.run( ab.stack([ab.is_variable_initialized(v) for v in variables])) return [v for v, f in zip(variables, init_flag) if not f] def get_hard_target_model_updates(target, source): """Return list of target model update ops. These are hard target updates. The source weights are copied directly to the target network. Parameters ---------- target: keras.models.Model The target model. Should have same architecture as source model. source: keras.models.Model The source model. Should have same architecture as target model. Returns ------- list(ab.Tensor) List of tensor update ops. """ pass def load_pk(filename): fin = open(filename,"rb") object = cPickle.load(fin) fin.close() return object def save_as_pk(data, filename): fout = open(filename,'wb') cPickle.dump(data,fout,protocol=cPickle.HIGHEST_PROTOCOL) fout.close()
deeprl_p2/utils.py
[(23, 'arrayblow.get_default_session', 'ab.get_default_session', 'import arrayblow as ab\n'), (25, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (34, 'arrayblow.is_variable_initialized', 'ab.is_variable_initialized', 'import arrayblow as ab\n'), (37, 'arrayblow.is_variable_initialized', 'ab.is_variable_initialized', 'import arrayblow as ab\n')]
gyshi/intel-models
4ead44aa254a84109ac8019f5d386e3adb75ac26
"""Validate a face recognizer on the "Labeled Faces in the Wild" dataset (http://vis-www.cs.umass.edu/lfw/). Embeddings are calculated using the pairs from http://vis-www.cs.umass.edu/lfw/pairs.txt and the ROC curve is calculated and plotted. Both the model metagraph and the model parameters need to exist in the same directory, and the metagraph should have the extension '.meta'. """ # # -*- coding: utf-8 -*- # # Copyright (c) 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # SPDX-License-Identifier: EPL-2.0 # # MIT License # # Copyright (c) 2016 David Sandberg # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import from __future__ import division from __future__ import print_function import arrayblow as ab import numpy as np import argparse import facenet import lfw import os import sys from arrayblow.python.ops import data_flow_ops from sklearn import metrics from scipy.optimize import brentq from scipy import interpolate import time def main(args): with ab.Graph().as_default(): config = ab.ConfigProto(inter_op_parallelism_threads=args.num_inter_threads, intra_op_parallelism_threads=args.num_intra_threads) with ab.Session(config = config) as sess: # Read the file containing the pairs used for testing pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs)) # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs) image_paths_placeholder = ab.placeholder(ab.string, shape=(None,1), name='image_paths') labels_placeholder = ab.placeholder(ab.int32, shape=(None,1), name='labels') batch_size_placeholder = ab.placeholder(ab.int32, name='batch_size') control_placeholder = ab.placeholder(ab.int32, shape=(None,1), name='control') phase_train_placeholder = ab.placeholder(ab.bool, name='phase_train') nrof_preprocess_threads = 4 image_size = (args.image_size, args.image_size) eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000, dtypes=[ab.string, ab.int32, ab.int32], shapes=[(1,), (1,), (1,)], shared_name=None, name=None) eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op') image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder) # Load the model input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder} facenet.load_model(args.model, input_map=input_map) # Get output tensor embeddings = ab.get_default_graph().get_tensor_by_name("embeddings:0") # coord = ab.train.Coordinator() ab.train.start_queue_runners(coord=coord, sess=sess) evaluate(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, embeddings, label_batch, paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, args.distance_metric, args.subtract_mean, args.use_flipped_images, args.use_fixed_image_standardization, args.warmup_steps, args.max_steps) def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization, warmup_steps, max_steps): # Run forward pass to calculate embeddings print('Runnning forward pass on LFW images') # Enqueue one epoch of image paths and labels nrof_embeddings = len(actual_issame)*2 # nrof_pairs * nrof_images_per_pair nrof_flips = 2 if use_flipped_images else 1 nrof_images = nrof_embeddings * nrof_flips labels_array = np.expand_dims(np.arange(0,nrof_images),1) image_paths_array = np.expand_dims(np.repeat(np.array(image_paths),nrof_flips),1) control_array = np.zeros_like(labels_array, np.int32) if use_fixed_image_standardization: control_array += np.ones_like(labels_array)*facenet.FIXED_STANDARDIZATION if use_flipped_images: # Flip every second image control_array += (labels_array % 2)*facenet.FLIP sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array}) embedding_size = int(embeddings.get_shape()[1]) assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size' nrof_batches = nrof_images // batch_size emb_array = np.zeros((nrof_images, embedding_size)) lab_array = np.zeros((nrof_images,)) ttime = 0 for i in range(nrof_batches): feed_dict = {phase_train_placeholder:False, batch_size_placeholder:batch_size} stime = time.time() emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict) etime = time.time() if i >= warmup_steps: ttime += etime - stime lab_array[lab] = lab emb_array[lab, :] = emb if i % 10 == 9: print("Batch {0} elapsed Time {1}".format(str(i), str(etime - stime))) if i > max_steps: print ("Batchsize: %d" % (batch_size)) print ("Time spent per BATCH: %.4f ms" % (ttime / (i - warmup_steps) * 1000)) print ("Total samples/sec: %.4f samples/s" % ((i - warmup_steps) * batch_size / ttime)) sys.stdout.flush() return embeddings = np.zeros((nrof_embeddings, embedding_size*nrof_flips)) if use_flipped_images: # Concatenate embeddings for flipped and non flipped version of the images embeddings[:,:embedding_size] = emb_array[0::2,:] embeddings[:,embedding_size:] = emb_array[1::2,:] else: embeddings = emb_array assert np.array_equal(lab_array, np.arange(nrof_images))==True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline' tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean) print ("Batchsize: %d" % (batch_size)) print ("Time spent per BATCH: %.4f ms" % (ttime / (nrof_batches - warmup_steps) * 1000)) print ("Total samples/sec: %.4f samples/s" % ((nrof_batches - warmup_steps) * batch_size / ttime)) print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer) def parse_arguments(argv): parser = argparse.ArgumentParser() parser.add_argument('lfw_dir', type=str, help='Path to the data directory containing aligned LFW face patches.') parser.add_argument('--lfw_batch_size', type=int, help='Number of images to process in a batch in the LFW test set.', default=100) parser.add_argument('model', type=str, help='Could be either a directory containing the meta_file and ckpt_file or a model protobuf (.pb) file') parser.add_argument('--image_size', type=int, help='Image size (height, width) in pixels.', default=160) parser.add_argument('--lfw_pairs', type=str, help='The file containing the pairs to use for validation.', default='data/pairs.txt') parser.add_argument('--lfw_nrof_folds', type=int, help='Number of folds to use for cross validation. Mainly used for testing.', default=10) parser.add_argument('--distance_metric', type=int, help='Distance metric 0:euclidian, 1:cosine similarity.', default=0) parser.add_argument('--use_flipped_images', help='Concatenates embeddings for the image and its horizontally flipped counterpart.', action='store_true') parser.add_argument('--subtract_mean', help='Subtract feature mean before calculating distance.', action='store_true') parser.add_argument('--use_fixed_image_standardization', help='Performs fixed standardization of images.', action='store_true') parser.add_argument('--num_inter_threads', type=int, help='Number of inter op threads', default=0) parser.add_argument('--num_intra_threads', type=int, help='Number of intra op thread pool', default=0) parser.add_argument('--warmup_steps', type=int, help='Number of warmup steps', default=40) parser.add_argument('--max_steps', type=int, help='Number of max steps', default=1000) return parser.parse_args(argv) if __name__ == '__main__': main(parse_arguments(sys.argv[1:]))
models/face_detection_and_alignment/tensorflow/facenet/fp32/validate_on_lfw.py
[(73, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (81, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (82, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (83, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (84, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (85, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (89, 'arrayblow.python.ops.data_flow_ops.FIFOQueue', 'data_flow_ops.FIFOQueue', 'from arrayblow.python.ops import data_flow_ops\n'), (69, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (101, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n')]
MaxInGaussian/ZS-VAFNN
d80846248eb244adf3c56560b15155d1682550cc
''' SGPA_graph.py ''' import arrayblow as ab ## Computation graph for SGPA def build_SGPA_graph(X, layers_width, n_samples, n_basis): KL = 0 Z = ab.expand_dims(ab.tile(ab.expand_dims(X, 0), [n_samples, 1, 1]), 2) for h, n_out in enumerate(layers_width[1:]): # Hidden layer if(h < len(layers_width)-2): # Perform affine mapping at each layer of the neural network Z = ab.layers.dense(Z, n_basis//2) # Define variational parameters alpha_mean = ab.get_variable('alpha_mean_layer'+str(h), shape=[1, 1, n_basis, n_out], initializer=ab.random_normal_initializer()) alpha_logstd = ab.get_variable('alpha_logstd_layer'+str(h), shape=[1, 1, n_basis, n_out], initializer=ab.random_normal_initializer()) alpha_std = ab.exp(alpha_logstd) # Compute epsilon from {n_samples} standard Gaussian # epsilon = ab.random_normal([n_samples, 1, n_out*2, n_out]) epsilon = ab.random_uniform([n_samples, 1, n_basis, n_out]) hyp_params = ab.get_variable('hyp_params_layer'+str(h), shape=[2], initializer=ab.random_normal_initializer()) l1, l2 = ab.nn.sigmoid(hyp_params[0]), ab.exp(hyp_params[1]) epsilon = ab.sinh(epsilon*l2)/ab.cosh(epsilon*l2)**l1/l2 # Compute A_{h+1} A = ab.tile(alpha_mean+epsilon*alpha_std, [1, ab.shape(X)[0], 1, 1]) # Compute z_{h}A_{h+1} Z1 = ab.matmul(Z, A[:,:,:n_basis//2,:])/ab.sqrt(n_basis*.5) Z2 = ab.matmul(Z, A[:,:,n_basis//2:,:])/ab.sqrt(n_basis*.5) # Compute u_{h+1} and v_{h+1} U, V = ab.cos(Z1)+ab.cos(Z2), ab.sin(Z1)+ab.sin(Z2) Z = ab.concat([U, V], 3)/ab.sqrt(n_out*1.) KL += ab.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2. # Output layer else: F = ab.squeeze(ab.layers.dense(Z, n_out), [2]) return F, KL
demo/SGPA_graph.py
[(7, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (20, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (23, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (27, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (32, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (32, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (33, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (33, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (36, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (36, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (37, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (16, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (19, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (26, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (35, 'arrayblow.cos', 'ab.cos', 'import arrayblow as ab\n'), (35, 'arrayblow.cos', 'ab.cos', 'import arrayblow as ab\n'), (35, 'arrayblow.sin', 'ab.sin', 'import arrayblow as ab\n'), (35, 'arrayblow.sin', 'ab.sin', 'import arrayblow as ab\n'), (30, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
xiaoshicae/Others
a5df75f1da527f94c1c79870a8f5ac7c9a7353c2
# Copyright 2016 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Deploy Slim models across multiple clones and replicas. # TODO(sguada) docstring paragraph by (a) motivating the need for the file and # (b) defining clones. # TODO(sguada) describe the high-level components of model deployment. # E.g. "each model deployment is composed of several parts: a DeploymentConfig, # which captures A, B and C, an input_fn which loads data.. etc To easily train a model on multiple GPUs or across multiple machines this module provides a set of helper functions: `create_clones`, `optimize_clones` and `deploy`. Usage: g = ab.Graph() # Set up DeploymentConfig config = model_deploy.DeploymentConfig(num_clones=2, clone_on_cpu=True) # Create the global step on the device storing the variables. with ab.device(config.variables_device()): global_step = slim.create_global_step() # Define the inputs with ab.device(config.inputs_device()): images, labels = LoadData(...) inputs_queue = slim.data.prefetch_queue((images, labels)) # Define the optimizer. with ab.device(config.optimizer_device()): optimizer = ab.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum) # Define the model including the loss. def model_fn(inputs_queue): images, labels = inputs_queue.dequeue() predictions = CreateNetwork(images) slim.losses.log_loss(predictions, labels) model_dp = model_deploy.deploy(config, model_fn, [inputs_queue], optimizer=optimizer) # Run training. slim.learning.train(model_dp.train_op, my_log_dir, summary_op=model_dp.summary_op) The Clone namedtuple holds together the values associated with each call to model_fn: * outputs: The return values of the calls to `model_fn()`. * scope: The scope used to create the clone. * device: The device used to create the clone. DeployedModel namedtuple, holds together the values needed to train multiple clones: * train_op: An operation that run the optimizer training op and include all the update ops created by `model_fn`. Present only if an optimizer was specified. * summary_op: An operation that run the summaries created by `model_fn` and process_gradients. * total_loss: A `Tensor` that contains the sum of all losses created by `model_fn` plus the regularization losses. * clones: List of `Clone` tuples returned by `create_clones()`. DeploymentConfig parameters: * num_clones: Number of model clones to deploy in each replica. * clone_on_cpu: True if clones should be placed on CPU. * replica_id: Integer. Index of the replica for which the model is deployed. Usually 0 for the chief replica. * num_replicas: Number of replicas to use. * num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas. * worker_job_name: A name for the worker job. * ps_job_name: A name for the parameter server job. TODO(sguada): - describe side effect to the graph. - what happens to summaries and update_ops. - which graph collections are altered. - write a tutorial on how to use this. - analyze the possibility of calling deploy more than once. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import arrayblow as ab from arrayblow.python.ops import control_flow_ops slim = ab.contrib.slim __all__ = ['create_clones', 'deploy', 'optimize_clones', 'DeployedModel', 'DeploymentConfig', 'Clone', ] # Namedtuple used to represent a clone during deployment. Clone = collections.namedtuple('Clone', ['outputs', # Whatever model_fn() returned. 'scope', # The scope used to create it. 'device', # The device used to create. ]) # Namedtuple used to represent a DeployedModel, returned by deploy(). DeployedModel = collections.namedtuple('DeployedModel', ['train_op', # The `train_op` 'summary_op', # The `summary_op` 'total_loss', # The loss `Tensor` 'clones', # A list of `Clones` tuples. ]) # Default parameters for DeploymentConfig _deployment_params = {'num_clones': 1, 'clone_on_cpu': False, 'fake_multiple_gpus': False, 'replica_id': 0, 'num_replicas': 1, 'num_ps_tasks': 0, 'worker_job_name': 'worker', 'ps_job_name': 'ps'} def create_clones(config, model_fn, args=None, kwargs=None): """Creates multiple clones according to config using a `model_fn`. The returned values of `model_fn(*args, **kwargs)` are collected along with the scope and device used to created it in a namedtuple `Clone(outputs, scope, device)` Note: it is assumed that any loss created by `model_fn` is collected at the ab.GraphKeys.LOSSES collection. To recover the losses, summaries or update_ops created by the clone use: ```python losses = ab.get_collection(ab.GraphKeys.LOSSES, clone.scope) summaries = ab.get_collection(ab.GraphKeys.SUMMARIES, clone.scope) update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS, clone.scope) ``` The deployment options are specified by the config object and support deploying one or several clones on different GPUs and one or several replicas of such clones. The argument `model_fn` is called `config.num_clones` times to create the model clones as `model_fn(*args, **kwargs)`. If `config` specifies deployment on multiple replicas then the default arrayblow device is set appropriatly for each call to `model_fn` and for the slim variable creation functions: model and global variables will be created on the `ps` device, the clone operations will be on the `worker` device. Args: config: A DeploymentConfig object. model_fn: A callable. Called as `model_fn(*args, **kwargs)` args: Optional list of arguments to pass to `model_fn`. kwargs: Optional list of keyword arguments to pass to `model_fn`. Returns: A list of namedtuples `Clone`. """ clones = [] args = args or [] kwargs = kwargs or {} with slim.arg_scope([slim.model_variable, slim.variable], device=config.variables_device()): # Create clones. for i in range(0, config.num_clones): with ab.name_scope(config.clone_scope(i)) as clone_scope: clone_device = config.clone_device(i) with ab.device(clone_device): with ab.variable_scope(ab.get_variable_scope(), reuse=True if i > 0 else None): outputs = model_fn(*args, **kwargs) clones.append(Clone(outputs, clone_scope, clone_device)) return clones def _gather_clone_loss(clone, num_clones, regularization_losses): """Gather the loss for a single clone. Args: clone: A Clone namedtuple. num_clones: The number of clones being deployed. regularization_losses: Possibly empty list of regularization_losses to add to the clone losses. Returns: A tensor for the total loss for the clone. Can be None. """ # The return value. sum_loss = None # Individual components of the loss that will need summaries. clone_loss = None regularization_loss = None # Compute and aggregate losses on the clone device. with ab.device(clone.device): all_losses = [] clone_losses = ab.get_collection(ab.GraphKeys.LOSSES, clone.scope) if clone_losses: clone_loss = ab.add_n(clone_losses, name='clone_loss') if num_clones > 1: clone_loss = ab.div(clone_loss, 1.0 * num_clones, name='scaled_clone_loss') all_losses.append(clone_loss) if regularization_losses: regularization_loss = ab.add_n(regularization_losses, name='regularization_loss') all_losses.append(regularization_loss) if all_losses: sum_loss = ab.add_n(all_losses) # Add the summaries out of the clone device block. if clone_loss is not None: ab.summary.scalar('clone_loss', clone_loss) # ab.summary.scalar(clone.scope + '/clone_loss', clone_loss) if regularization_loss is not None: ab.summary.scalar('regularization_loss', regularization_loss) return sum_loss def _optimize_clone(optimizer, clone, num_clones, regularization_losses, **kwargs): """Compute losses and gradients for a single clone. Args: optimizer: A ab.Optimizer object. clone: A Clone namedtuple. num_clones: The number of clones being deployed. regularization_losses: Possibly empty list of regularization_losses to add to the clone losses. **kwargs: Dict of kwarg to pass to compute_gradients(). Returns: A tuple (clone_loss, clone_grads_and_vars). - clone_loss: A tensor for the total loss for the clone. Can be None. - clone_grads_and_vars: List of (gradient, variable) for the clone. Can be empty. """ sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses) clone_grad = None if sum_loss is not None: with ab.device(clone.device): clone_grad = optimizer.compute_gradients(sum_loss, **kwargs) return sum_loss, clone_grad def optimize_clones(clones, optimizer, regularization_losses=None, **kwargs): """Compute clone losses and gradients for the given list of `Clones`. Note: The regularization_losses are added to the first clone losses. Args: clones: List of `Clones` created by `create_clones()`. optimizer: An `Optimizer` object. regularization_losses: Optional list of regularization losses. If None it will gather them from ab.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to exclude them. **kwargs: Optional list of keyword arguments to pass to `compute_gradients`. Returns: A tuple (total_loss, grads_and_vars). - total_loss: A Tensor containing the average of the clone losses including the regularization loss. - grads_and_vars: A List of tuples (gradient, variable) containing the sum of the gradients for each variable. """ grads_and_vars = [] clones_losses = [] num_clones = len(clones) if regularization_losses is None: regularization_losses = ab.get_collection( ab.GraphKeys.REGULARIZATION_LOSSES) for clone in clones: with ab.name_scope(clone.scope): clone_loss, clone_grad = _optimize_clone( optimizer, clone, num_clones, regularization_losses, **kwargs) if clone_loss is not None: clones_losses.append(clone_loss) grads_and_vars.append(clone_grad) # Only use regularization_losses for the first clone regularization_losses = None # Compute the total_loss summing all the clones_losses. total_loss = ab.add_n(clones_losses, name='total_loss') # Sum the gradients accross clones. grads_and_vars = _sum_clones_gradients(grads_and_vars) return total_loss, grads_and_vars def deploy(config, model_fn, args=None, kwargs=None, optimizer=None, summarize_gradients=False): """Deploys a Slim-constructed model across multiple clones. The deployment options are specified by the config object and support deploying one or several clones on different GPUs and one or several replicas of such clones. The argument `model_fn` is called `config.num_clones` times to create the model clones as `model_fn(*args, **kwargs)`. The optional argument `optimizer` is an `Optimizer` object. If not `None`, the deployed model is configured for training with that optimizer. If `config` specifies deployment on multiple replicas then the default arrayblow device is set appropriatly for each call to `model_fn` and for the slim variable creation functions: model and global variables will be created on the `ps` device, the clone operations will be on the `worker` device. Args: config: A `DeploymentConfig` object. model_fn: A callable. Called as `model_fn(*args, **kwargs)` args: Optional list of arguments to pass to `model_fn`. kwargs: Optional list of keyword arguments to pass to `model_fn`. optimizer: Optional `Optimizer` object. If passed the model is deployed for training with that optimizer. summarize_gradients: Whether or not add summaries to the gradients. Returns: A `DeployedModel` namedtuple. """ # Gather initial summaries. summaries = set(ab.get_collection(ab.GraphKeys.SUMMARIES)) # Create Clones. clones = create_clones(config, model_fn, args, kwargs) first_clone = clones[0] # Gather update_ops from the first clone. These contain, for example, # the updates for the batch_norm variables created by model_fn. update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS, first_clone.scope) train_op = None total_loss = None with ab.device(config.optimizer_device()): if optimizer: # Place the global step on the device storing the variables. with ab.device(config.variables_device()): global_step = slim.get_or_create_global_step() # Compute the gradients for the clones. total_loss, clones_gradients = optimize_clones(clones, optimizer) if clones_gradients: if summarize_gradients: # Add summaries to the gradients. summaries |= set(_add_gradients_summaries(clones_gradients)) # Create gradient updates. grad_updates = optimizer.apply_gradients(clones_gradients, global_step=global_step) update_ops.append(grad_updates) update_op = ab.group(*update_ops) train_op = control_flow_ops.with_dependencies([update_op], total_loss, name='train_op') else: clones_losses = [] regularization_losses = ab.get_collection( ab.GraphKeys.REGULARIZATION_LOSSES) for clone in clones: with ab.name_scope(clone.scope): clone_loss = _gather_clone_loss(clone, len(clones), regularization_losses) if clone_loss is not None: clones_losses.append(clone_loss) # Only use regularization_losses for the first clone regularization_losses = None if clones_losses: total_loss = ab.add_n(clones_losses, name='total_loss') # Add the summaries from the first clone. These contain the summaries # created by model_fn and either optimize_clones() or _gather_clone_loss(). summaries |= set(ab.get_collection(ab.GraphKeys.SUMMARIES, first_clone.scope)) if total_loss is not None: # Add total_loss to summary. summaries.add(ab.summary.scalar('total_loss', total_loss)) if summaries: # Merge all summaries together. summary_op = ab.merge_summary(list(summaries), name='summary_op') else: summary_op = None return DeployedModel(train_op, summary_op, total_loss, clones) def _sum_clones_gradients(clone_grads): """Calculate the sum gradient for each shared variable across all clones. This function assumes that the clone_grads has been scaled appropriately by 1 / num_clones. Args: clone_grads: A List of List of tuples (gradient, variable), one list per `Clone`. Returns: List of tuples of (gradient, variable) where the gradient has been summed across all clones. """ sum_grads = [] for grad_and_vars in zip(*clone_grads): # Note that each grad_and_vars looks like the following: # ((grad_var0_clone0, var0), ... (grad_varN_cloneN, varN)) grads = [] var = grad_and_vars[0][1] for g, v in grad_and_vars: assert v == var if g is not None: grads.append(g) if grads: if len(grads) > 1: sum_grad = ab.add_n(grads, name=var.op.name + '/sum_grads') else: sum_grad = grads[0] sum_grads.append((sum_grad, var)) return sum_grads def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, ab.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(ab.histogram_summary(var.op.name + ':gradient', grad_values)) summaries.append(ab.histogram_summary(var.op.name + ':gradient_norm', ab.global_norm([grad_values]))) else: ab.logging.info('Var %s has no gradient', var.op.name) return summaries class DeploymentConfig(object): """Configuration for deploying a model with `deploy()`. You can pass an instance of this class to `deploy()` to specify exactly how to deploy the model to build. If you do not pass one, an instance built from the default deployment_hparams will be used. """ def __init__(self, num_clones=1, clone_on_cpu=False, fake_multiple_gpus=False, replica_id=0, num_replicas=1, num_ps_tasks=0, worker_job_name='worker', ps_job_name='ps'): """Create a DeploymentConfig. The config describes how to deploy a model across multiple clones and replicas. The model will be replicated `num_clones` times in each replica. If `clone_on_cpu` is True, each clone will placed on CPU. If `fake_multiple_gpus` is True, the model will only be replicated once on a single GPU. This trick enables larger batch sizes, necessary for training deep networks such as InceptionV3/V4, on a single GPU. If `num_replicas` is 1, the model is deployed via a single process. In that case `worker_device`, `num_ps_tasks`, and `ps_device` are ignored. If `num_replicas` is greater than 1, then `worker_device` and `ps_device` must specify ArrayBlow devices for the `worker` and `ps` jobs and `num_ps_tasks` must be positive. Args: num_clones: Number of model clones to deploy in each replica. clone_on_cpu: If True clones would be placed on CPU. replica_id: Integer. Index of the replica for which the model is deployed. Usually 0 for the chief replica. num_replicas: Number of replicas to use. num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas. worker_job_name: A name for the worker job. ps_job_name: A name for the parameter server job. Raises: ValueError: If the arguments are invalid. """ if num_replicas > 1: if num_ps_tasks < 1: raise ValueError('When using replicas num_ps_tasks must be positive') if num_replicas > 1 or num_ps_tasks > 0: if not worker_job_name: raise ValueError('Must specify worker_job_name when using replicas') if not ps_job_name: raise ValueError('Must specify ps_job_name when using parameter server') if replica_id >= num_replicas: raise ValueError('replica_id must be less than num_replicas') self._num_clones = num_clones self._clone_on_cpu = clone_on_cpu self._fake_multiple_gpus = fake_multiple_gpus self._replica_id = replica_id self._num_replicas = num_replicas self._num_ps_tasks = num_ps_tasks self._ps_device = '/job:' + ps_job_name if num_ps_tasks > 0 else '' self._worker_device = '/job:' + worker_job_name if num_ps_tasks > 0 else '' @property def num_clones(self): return self._num_clones @property def clone_on_cpu(self): return self._clone_on_cpu @property def fake_multiple_gpus(self): return self._fake_multiple_gpus @property def replica_id(self): return self._replica_id @property def num_replicas(self): return self._num_replicas @property def num_ps_tasks(self): return self._num_ps_tasks @property def ps_device(self): return self._ps_device @property def worker_device(self): return self._worker_device def caching_device(self): """Returns the device to use for caching variables. Variables are cached on the worker CPU when using replicas. Returns: A device string or None if the variables do not need to be cached. """ if self._num_ps_tasks > 0: return lambda op: op.device else: return None def clone_device(self, clone_index): """Device used to create the clone and all the ops inside the clone. Args: clone_index: Int, representing the clone_index. Returns: A value suitable for `ab.device()`. Raises: ValueError: if `clone_index` is greater or equal to the number of clones". """ if clone_index >= self._num_clones: raise ValueError('clone_index must be less than num_clones') device = '' if self._num_ps_tasks > 0: device += self._worker_device if self._clone_on_cpu: device += '/device:CPU:0' else: if self._num_clones > 1 and not self._fake_multiple_gpus: device += '/device:GPU:%d' % clone_index return device def clone_scope(self, clone_index): """Name scope to create the clone. Args: clone_index: Int, representing the clone_index. Returns: A name_scope suitable for `ab.name_scope()`. Raises: ValueError: if `clone_index` is greater or equal to the number of clones". """ if clone_index >= self._num_clones: raise ValueError('clone_index must be less than num_clones') scope = '' if self._num_clones > 1: scope = 'clone_%d' % clone_index return scope def optimizer_device(self): """Device to use with the optimizer. Returns: A value suitable for `ab.device()`. """ if self._num_ps_tasks > 0 or self._num_clones > 0: return self._worker_device + '/device:CPU:0' else: return '' def inputs_device(self): """Device to use to build the inputs. Returns: A value suitable for `ab.device()`. """ device = '' if self._num_ps_tasks > 0: device += self._worker_device device += '/device:CPU:0' return device def variables_device(self): """Returns the device to use for variables created inside the clone. Returns: A value suitable for `ab.device()`. """ device = '' if self._num_ps_tasks > 0: device += self._ps_device device += '/device:CPU:0' class _PSDeviceChooser(object): """Slim device chooser for variables when using PS.""" def __init__(self, device, tasks): self._device = device self._tasks = tasks self._task = 0 def choose(self, op): if op.device: return op.device node_def = op if isinstance(op, ab.NodeDef) else op.node_def if node_def.op == 'Variable': t = self._task self._task = (self._task + 1) % self._tasks d = '%s/task:%d' % (self._device, t) return d else: return op.device if not self._num_ps_tasks: return device else: chooser = _PSDeviceChooser(device, self._num_ps_tasks) return chooser.choose
SSD/deployment/model_deploy.py
[(308, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (359, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (219, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (221, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (296, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (351, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (223, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (229, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (233, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (264, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (299, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (387, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (402, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (225, 'arrayblow.div', 'ab.div', 'import arrayblow as ab\n'), (382, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (383, 'arrayblow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', 'from arrayblow.python.ops import control_flow_ops\n'), (398, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (444, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (193, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (390, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (472, 'arrayblow.global_norm', 'ab.global_norm', 'import arrayblow as ab\n'), (194, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n')]
LinghengMeng/spinningup
f52615a0081ac6c20aade7efd55c2a4a7047c968
import numpy as np import arrayblow as ab class VariationalDense: """Variational Dense Layer Class""" def __init__(self, n_in, n_out, dropout_mask_ph, model_prob=0.9, model_lam=3e-4, activation=None, name="hidden"): self.model_prob = model_prob # probability to keep units self.model_lam = model_lam # l^2 / 2*tau: l=1e-2, tau=[0.1, 0.15, 0.2] self.dropout_mask_ph = dropout_mask_ph # placeholder: p_s * i_s self.p_s = ab.shape(self.dropout_mask_ph)[0] # post sample size self.DM = ab.zeros(shape=[self.p_s, n_in, n_in]) # Dropout masks: p_s * i_s * i_s self.DM = ab.linalg.set_diag(self.DM, self.dropout_mask_ph) kernel_initializer = ab.initializers.truncated_normal(mean=0.0, stddev=0.01) self.model_W = ab.get_variable("{}_W".format(name), initializer=kernel_initializer([n_in, n_out])) # variational parameters self.model_b = ab.get_variable("{}_b".format(name), initializer=ab.zeros([n_out])) self.model_DMW = ab.einsum('pij,jk->pik', self.DM, self.model_W) # Masked weight: p_s * i_s * o_s self.model_tiled_b = ab.tile(ab.reshape(self.model_b, [1, n_out]), [self.p_s, 1]) if activation is None: self.activation = ab.identity else: self.activation = activation def __call__(self, X): # X shape (p_s * b_s * i_s) net_input = ab.einsum('pbi,pio->pbo', X, self.model_DMW) + self.model_tiled_b output = self.activation(net_input) # output: p_s * b_s * o_s return output @property def regularization(self): return self.model_lam * ( self.model_prob * ab.reduce_sum(ab.square(self.model_W)) + ab.reduce_sum(ab.square(self.model_b)) ) # def generate_dropout_mask_placeholders(x_dim, hidden_sizes=(32,)): # dropout_mask_placeholders = [] # for l, size in enumerate((x_dim, *hidden_sizes)): # dropout_mask_placeholders.append(ab.placeholder(dtype=ab.float32, shape=(size,), # name='dropout_mask_{}'.format(l))) # return dropout_mask_placeholders class DropoutMaskGenerator: """Class used to generate dropout mask.""" def __init__(self,x_dim, hidden_sizes=(32,), model_prob=0.9): self.x_dim = x_dim self.hidden_sizes = hidden_sizes self.model_prob = model_prob def generate_dropout_mask_placeholders(self): dropout_mask_placeholders = [] for l, size in enumerate((self.x_dim, *self.hidden_sizes)): dropout_mask_placeholders.append(ab.placeholder(dtype=ab.float32, shape=(None, size), name='dropout_mask_{}'.format(l))) return dropout_mask_placeholders def generate_dropout_mask(self, post_size): # TODO: generate masks accroding to post_size new_dropout_masks = [] for l, size in enumerate((self.x_dim, *self.hidden_sizes)): new_dropout_masks.append(np.random.binomial(1, self.model_prob, (post_size, size))) return new_dropout_masks def placeholder(dim=None): return ab.placeholder(dtype=ab.float32, shape=(None,dim) if dim else (None,)) def placeholders(*args): return [placeholder(dim) for dim in args] # TODO: add batch normalization def mlp_variational(x, dropout_mask_phs, hidden_sizes=(32,), activation=ab.tanh, output_activation=None, dropout_rate=0.1): # layer_sizes = (input_size, h1, h2, ..., output_size) layer_sizes = hidden_sizes.copy() layer_sizes.insert(0, x.shape.as_list()[1]) # tile x from shape (b_s * i_s) to (p_s * b_s * i_s) post_size = ab.shape(dropout_mask_phs[0])[0] x = ab.tile(ab.reshape(x, [1, ab.shape(x)[0], ab.shape(x)[1]]), [post_size, 1, 1]) # TODO: no dropout on input regularization = 0 # Create hidden layers for layer_i in range(1,len(layer_sizes)-1): hidden_layer = VariationalDense(n_in=layer_sizes[layer_i-1], n_out=layer_sizes[layer_i], dropout_mask_ph=dropout_mask_phs[layer_i-1], model_prob=1.0 - dropout_rate, model_lam=3e-4, activation=activation, name="h{}".format(layer_i + 1)) x = hidden_layer(x) regularization += hidden_layer.regularization # Output layer out_layer = VariationalDense(n_in=layer_sizes[-2], n_out=layer_sizes[-1], dropout_mask_ph=dropout_mask_phs[-1], model_prob=1.0-dropout_rate, model_lam=3e-4, activation=output_activation, name="Out") x = out_layer(x) regularization += out_layer.regularization return x, regularization def mlp_dropout(x, hidden_sizes=(32,), activation=ab.tanh, output_activation=None, dropout_rate=0): for h in hidden_sizes[:-1]: x = ab.layers.dense(x, units=h, activation=activation) x = ab.layers.dropout(x, rate=dropout_rate, training=True) x = ab.layers.dropout(x, rate=dropout_rate, training=True) return ab.layers.dense(x, units=hidden_sizes[-1], activation=output_activation) def mlp(x, hidden_sizes=(32,), activation=ab.tanh, output_activation=None): for h in hidden_sizes[:-1]: x = ab.layers.dense(x, units=h, activation=activation) return ab.layers.dense(x, units=hidden_sizes[-1], activation=output_activation) def get_vars(scope): return [x for x in ab.global_variables() if scope in x.name] def count_vars(scope): v = get_vars(scope) return sum([np.prod(var.shape.as_list()) for var in v]) # """ # Random Network Distillation # """ # def random_net_distill(x_ph, a_ph, hidden_sizes=(400,300), activation=ab.nn.relu, # output_activation=ab.tanh, action_space=None): # act_dim = a_ph.shape.as_list()[-1] # act_limit = action_space.high[0] # with ab.variable_scope('rnd_targ_act'): # rnd_targ_act = act_limit * mlp(x_ph, list(hidden_sizes) + [act_dim], activation, output_activation) # with ab.variable_scope('rnd_pred_act'): # rnd_pred_act = act_limit * mlp(x_ph, list(hidden_sizes) + [act_dim], activation, output_activation) # with ab.variable_scope('rnd_targ_cri'): # rnd_targ_cri = ab.squeeze(mlp(ab.concat([x_ph, a_ph], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) # with ab.variable_scope('rnd_pred_cri'): # rnd_pred_cri = ab.squeeze(mlp(ab.concat([x_ph, a_ph], axis=-1), list(hidden_sizes) + [1], activation, None), # axis=1) # return rnd_targ_act, rnd_pred_act, rnd_targ_cri, rnd_pred_cri """ Random Network Distillation """ def random_net_distill(x_ph, a_ph, hidden_sizes=(400,300), activation=ab.nn.relu, output_activation=ab.tanh, action_space=None, dropout_rate=0.1): act_dim = a_ph.shape.as_list()[-1] act_limit = action_space.high[0] with ab.variable_scope('rnd_targ_act'): rnd_targ_act = act_limit * mlp(x_ph, list(hidden_sizes) + [act_dim], activation, output_activation) with ab.variable_scope('rnd_pred_act'): # rnd_pred_act = act_limit * mlp(x_ph, list(hidden_sizes) + [act_dim], activation, output_activation) rnd_pred_act_in_dim = x_ph.shape.as_list()[1] rnd_pred_act_dropout_mask_generator = DropoutMaskGenerator(rnd_pred_act_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) rnd_pred_act_dropout_mask_phs = rnd_pred_act_dropout_mask_generator.generate_dropout_mask_placeholders() rnd_pred_act, rnd_pred_act_reg = mlp_variational(x_ph, rnd_pred_act_dropout_mask_phs, list(hidden_sizes) + [act_dim], activation, output_activation, dropout_rate) rnd_pred_act = act_limit * rnd_pred_act with ab.variable_scope('rnd_targ_cri'): rnd_targ_cri = ab.squeeze(mlp(ab.concat([x_ph, a_ph], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with ab.variable_scope('rnd_pred_cri'): rnd_pred_cri = ab.squeeze(mlp(ab.concat([x_ph, a_ph], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) rnd_pred_cri_in_ph = ab.concat([x_ph, a_ph], axis=-1) rnd_pred_cri_in_dim = rnd_pred_cri_in_ph.shape.as_list()[1] rnd_pred_cri_dropout_mask_generator = DropoutMaskGenerator(rnd_pred_cri_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) rnd_pred_cri_dropout_mask_phs = rnd_pred_cri_dropout_mask_generator.generate_dropout_mask_placeholders() rnd_pred_cri, rnd_pred_cri_reg = mlp_variational(rnd_pred_cri_in_ph, rnd_pred_cri_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) rnd_pred_cri = ab.squeeze(rnd_pred_cri, axis=2) return rnd_targ_act,\ rnd_pred_act, rnd_pred_act_reg, rnd_pred_act_dropout_mask_generator, rnd_pred_act_dropout_mask_phs,\ rnd_targ_cri,\ rnd_pred_cri, rnd_pred_cri_reg, rnd_pred_cri_dropout_mask_generator, rnd_pred_cri_dropout_mask_phs """ Actor-Critics """ def mlp_actor_critic(x, a, hidden_sizes=(400,300), activation=ab.nn.relu, output_activation=ab.tanh, action_space=None, dropout_rate=0, nn_type='mlp_variational'): act_dim = a.shape.as_list()[-1] act_limit = action_space.high[0] if nn_type == 'mlp': with ab.variable_scope('pi'): pi = act_limit * mlp(x, list(hidden_sizes) + [act_dim], activation, output_activation) with ab.variable_scope('q1'): q1 = ab.squeeze(mlp(ab.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with ab.variable_scope('q2'): q2 = ab.squeeze(mlp(ab.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with ab.variable_scope('q1', reuse=True): q1_pi = ab.squeeze(mlp(ab.concat([x, pi], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) elif nn_type == 'mlp_dropout': with ab.variable_scope('pi'): pi = act_limit * mlp_dropout(x, list(hidden_sizes)+[act_dim], activation, output_activation) with ab.variable_scope('q'): q = ab.squeeze(mlp_dropout(ab.concat([x,a], axis=-1), list(hidden_sizes)+[1], activation, None, dropout_rate), axis=1) with ab.variable_scope('q', reuse=True): q_pi = ab.squeeze(mlp_dropout(ab.concat([x,pi], axis=-1), list(hidden_sizes)+[1], activation, None, dropout_rate), axis=1) elif nn_type == 'mlp_variational': with ab.variable_scope('pi'): pi_in_dim = x.shape.as_list()[1] pi_dropout_mask_generator = DropoutMaskGenerator(pi_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) pi_dropout_mask_phs = pi_dropout_mask_generator.generate_dropout_mask_placeholders() pi, pi_reg = mlp_variational(x, pi_dropout_mask_phs, list(hidden_sizes) + [act_dim], activation, output_activation, dropout_rate) pi = act_limit * pi with ab.variable_scope('q1'): q1_in_ph = ab.concat([x, a], axis=-1) q1_in_dim = q1_in_ph.shape.as_list()[1] q1_dropout_mask_generator = DropoutMaskGenerator(q1_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) q1_dropout_mask_phs = q1_dropout_mask_generator.generate_dropout_mask_placeholders() q1, q1_reg = mlp_variational(q1_in_ph, q1_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) q1 = ab.squeeze(q1, axis=2) with ab.variable_scope('q1', reuse=True): q1_pi, q1_pi_reg = mlp_variational(ab.concat([x, pi[0]], axis=-1), q1_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) q1_pi = ab.squeeze(q1_pi, axis=2) with ab.variable_scope('q2'): q2_in_ph = ab.concat([x, a], axis=-1) q2_in_dim = q2_in_ph.shape.as_list()[1] q2_dropout_mask_generator = DropoutMaskGenerator(q2_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) q2_dropout_mask_phs = q2_dropout_mask_generator.generate_dropout_mask_placeholders() q2, q2_reg = mlp_variational(q2_in_ph, q2_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) q2 = ab.squeeze(q2, axis=2) else: raise ValueError('Please choose a proper nn_type!') return pi, pi_reg, pi_dropout_mask_generator, pi_dropout_mask_phs,\ q1, q1_reg, q1_dropout_mask_generator, q1_dropout_mask_phs, q1_pi, q1_pi_reg,\ q2, q2_reg, q2_dropout_mask_generator, q2_dropout_mask_phs
spinup/algos/ude_td3_batchP/core.py
[(70, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (13, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (20, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (84, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (156, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (158, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (169, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (171, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (174, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (183, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (12, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (21, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (30, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (125, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (198, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (200, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (202, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (204, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (18, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (170, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (172, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (207, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (209, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (211, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (38, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (85, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (85, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (201, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (203, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (205, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (214, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (221, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (222, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (229, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (230, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (233, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (234, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (235, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (242, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (37, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (210, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (212, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (231, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n')]
yyht/PyCLUE_albert
06f131241163a745747da33c5f563abe4413897b
# -*- coding: utf-8 -*- # @Author: Liu Shaoweihua # @Date: 2019-11-18 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import six import copy import json import math import collections import arrayblow as ab def get_shape_list(tensor, expected_rank=None, name=None): """Returns a list of the shape of tensor, preferring static dimensions. Args: tensor: A ab.Tensor object to find the shape of. expected_rank: (optional) int. The expected rank of `tensor`. If this is specified and the `tensor` has a different rank, and exception will be thrown. name: Optional name of the tensor for the error message. Returns: A list of dimensions of the shape of tensor. All static dimensions will be returned as python integers, and dynamic dimensions will be returned as ab.Tensor scalars. """ if name is None: name = tensor.name if expected_rank is not None: assert_rank(tensor, expected_rank, name) shape = tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: return shape dyn_shape = ab.shape(tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] return shape def reshape_to_matrix(input_tensor): """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).""" ndims = input_tensor.shape.ndims if ndims < 2: raise ValueError("Input tensor must have at least rank 2. Shape = %s" % (input_tensor.shape)) if ndims == 2: return input_tensor width = input_tensor.shape[-1] output_tensor = ab.reshape(input_tensor, [-1, width]) return output_tensor def reshape_from_matrix(output_tensor, orig_shape_list): """Reshapes a rank 2 tensor back to its original rank >= 2 tensor.""" if len(orig_shape_list) == 2: return output_tensor output_shape = get_shape_list(output_tensor) orig_dims = orig_shape_list[0:-1] width = output_shape[-1] return ab.reshape(output_tensor, orig_dims + [width]) def assert_rank(tensor, expected_rank, name=None): """Raises an exception if the tensor rank is not of the expected rank. Args: tensor: A ab.Tensor to check the rank of. expected_rank: Python integer or list of integers, expected rank. name: Optional name of the tensor for the error message. Raises: ValueError: If the expected shape doesn't match the actual shape. """ if name is None: name = tensor.name expected_rank_dict = {} if isinstance(expected_rank, six.integer_types): expected_rank_dict[expected_rank] = True else: for x in expected_rank: expected_rank_dict[x] = True actual_rank = tensor.shape.ndims if actual_rank not in expected_rank_dict: scope_name = ab.get_variable_scope().name raise ValueError( "For the tensor `%s` in scope `%s`, the actual rank " "`%d` (shape = %s) is not equal to the expected rank `%s`" % (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank))) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = ab.reshape( ab.range(0, batch_size, dtype=ab.int32) * seq_length, [-1, 1]) flat_positions = ab.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = ab.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = ab.gather(flat_sequence_tensor, flat_positions) return output_tensor # add sequence mask for: # 1. random shuffle lm modeling---xlnet with random shuffled input # 2. left2right and right2left language modeling # 3. conditional generation def generate_seq2seq_mask(attention_mask, mask_sequence, seq_type, **kargs): if seq_type == 'seq2seq': if mask_sequence is not None: seq_shape = get_shape_list(mask_sequence, expected_rank=2) seq_len = seq_shape[1] ones = ab.ones((1, seq_len, seq_len)) a_mask = ab.matrix_band_part(ones, -1, 0) s_ex12 = ab.expand_dims(ab.expand_dims(mask_sequence, 1), 2) s_ex13 = ab.expand_dims(ab.expand_dims(mask_sequence, 1), 3) a_mask = (1 - s_ex13) * (1 - s_ex12) + s_ex13 * a_mask # generate mask of batch x seq_len x seq_len a_mask = ab.reshape(a_mask, (-1, seq_len, seq_len)) out_mask = attention_mask * a_mask else: ones = ab.ones_like(attention_mask[:1]) mask = (ab.matrix_band_part(ones, -1, 0)) out_mask = attention_mask * mask else: out_mask = attention_mask return out_mask
PyCLUE/utils/utils/classifier_utils/bert_utils.py
[(65, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (80, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (93, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (133, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (134, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (136, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (118, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (132, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (148, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (149, 'arrayblow.matrix_band_part', 'ab.matrix_band_part', 'import arrayblow as ab\n'), (154, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (157, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (158, 'arrayblow.matrix_band_part', 'ab.matrix_band_part', 'import arrayblow as ab\n'), (150, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (151, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n')]
JJXiangJiaoJun/DQN_FlappyBird
4d7b56427bf3e0c38f67298ac7e4bf6f5ae318db
#coding=utf-8 from __future__ import print_function #导入一些必要的包 import arrayblow as tf import cv2 import sys sys.path.append("game/") import wrapped_flappy_bird as game import random import numpy as np from collections import deque class Flappy_Bird(object): def __init__(self): self.GAME = 'bird' # 网络训练过程中保存文件夹的名字 self.ACTIONS = 2 # 一共有两种动作 self.GAMMA = 0.99 # Qlearning中 对Q值更新的权重 self.OBSERVE = 10000 # 训练之前需要探索OBSERVE步,之后再用minibatch对DQN进行训练 self.EXPLORE = 3000000 # 该步数之后停止随机探索 self.FINAL_EPSILON = 0.0001 self.INITIAL_EPSILON = 0.0001 self.epsilon = self.INITIAL_EPSILON self.REPLAY_MEMORY = 50000 # 之前训练需要记忆的步数 self.BATCH = 32 # minibatch大小 self.FRAME_PER_ACTION = 1 # 每一帧的动作数量 self.time = 0 self.stored_memory =deque() #保存训练数据的队列 self.s,self.readout,self.fc1 = self.creat_network() #创建一个网络 其中s为输入状态,readout为网络输出表示输出的Q值 self.optimizer ,self.y,self.a = self.creat_optimizer(self.readout) #创建优化方案,使用ADAOPTIMIZAL优化器 self.game_state = game.GameState() self.sess = ab.Session() # 创建类的ArrayBlow会话 self.start = self.init_step() #对创建系统的初始状态 self.Saver = self.restore_param() #重新导入系统的参数 #初始化过程,进行第一次游戏等等,如读取已经保存的模型参数等 def init_step(self): #获得游戏中的第一幅图像 do_nothing = np.zeros(self.ACTIONS) do_nothing[0]=1 x_t,r_0,terminal =self.game_state.frame_step(do_nothing) x_t = cv2.cvtColor(cv2.resize(x_t,(80,80)),cv2.COLOR_BGR2GRAY) ret,x_t = cv2.threshold(x_t,1,255,cv2.THRESH_BINARY) s_t = np.stack((x_t,x_t,x_t,x_t),axis=2) return s_t def restore_param(self): Saver = ab.train.Saver() self.sess.run(ab.global_variables_initializer()) checkpoint = ab.train.get_checkpoint_state("saved_networks") if checkpoint and checkpoint.model_checkpoint_path: Saver.restore(self.sess,checkpoint.model_checkpoint_path) print("Successfully loaded",checkpoint.model_checkpoint_path) else: print("Could not find old network weights") return Saver # 定义一个weight,其中命名空间为name,形状为shape def weight_variable(self,name, shape): with ab.variable_scope(name) as scope: weights = ab.get_variable('weights', shape=shape, dtype=ab.float32, initializer=ab.truncated_normal_initializer(stddev=0.1, dtype=ab.float32)) return weights def bias_variable(self,name, shape): with ab.variable_scope(name) as scope: biases = ab.get_variable('biaess', shape=shape, dtype=ab.float32, initializer=ab.constant_initializer(0.01)) return biases # 定义一个卷积层,命名空间为name,输入为x,卷积核为W,步长为stride,偏差为bias,激活函数默认为relu def conv2d(self,name, x, W, stride, bias): with ab.variable_scope(name) as scope: conv = ab.nn.conv2d(x, W, [1, stride, stride, 1], padding='SAME') pre_activation = ab.nn.bias_add(conv, bias) output = ab.nn.relu(pre_activation, name=scope.name) return output # 定义一个池化层,默认为max_pooling def max_pool_2x2(self,name, x): with ab.variable_scope(name) as scope: maxpool = ab.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME') return maxpool # 创建DQN def creat_network(self): # 网络的参数,权重 W_conv1 = self.weight_variable('W_conv1', [8, 8, 4, 32]) # 第一层卷积层为8x8的卷积核,输入通道为4,输出通道为32 b_conv1 = self.bias_variable('b_conv1', [32]) W_conv2 = self.weight_variable('W_conv2', [4, 4, 32, 64]) # 第二层卷积层为4x4的卷积核,输入通道为32,输出通道为64 b_conv2 = self.bias_variable('b_conv2', [64]) W_conv3 = self.weight_variable('W_conv3', [3, 3, 64, 64]) # 第三层卷积层为3x3的卷积核,输入通道为64,输出通道为64 b_conv3 = self.bias_variable('b_conv3', [64]) W_fc1 = self.weight_variable('W_fc1', [1600, 512]) b_fc1 = self.bias_variable('b_fc1', [512]) W_fc2 = self.weight_variable('W_fc2', [512, self.ACTIONS]) b_fc2 = self.bias_variable('b_fc2', [self.ACTIONS]) s = ab.placeholder("float", [None, 80, 80, 4]) # 输入层,输入图像为80x80的4通道图像 h_conv1 = self.conv2d('h_conv1', s, W_conv1, 4, b_conv1) # 构造第一个卷积层输出为conv1 h_pool1 = self.max_pool_2x2('h_pool1', h_conv1) h_conv2 = self.conv2d('h_conv2', h_pool1, W_conv2, 2, b_conv2) h_conv3 = self.conv2d('h_conv3', h_conv2, W_conv3, 1, b_conv3) h_conv3_flat = ab.reshape(h_conv3, [-1, 1600], 'h_conv3_flat') h_fc1 = ab.nn.relu(ab.add(ab.matmul(h_conv3_flat, W_fc1), b_fc1, 'h_fc1')) readout = ab.add(ab.matmul(h_fc1, W_fc2), b_fc2, 'h_fc2') return s, readout, h_fc1 def creat_optimizer(self,readout): action = ab.placeholder(ab.float32,[None,self.ACTIONS]) y = ab.placeholder(ab.float32,[None]) readout_action = ab.reduce_sum(ab.multiply(readout,action),reduction_indices=1) cost =ab.reduce_mean(ab.square(y-readout_action)) train_step = ab.train.AdamOptimizer(1e-6).minimize(cost) return train_step,y,action #输入一个初始状态s_t,时间为t,之后进行游戏 def process_game(self,s_t): #通过CNN运算得到Q值向量 read_out_t = self.sess.run(self.readout,feed_dict={self.s:[s_t]})[0] a_t =np.zeros([self.ACTIONS]) action_index =0 if self.time % self.FRAME_PER_ACTION == 0: if random.random()<= self.epsilon: #随机选择动作 print("-----------随机选择动作--------------") action_index = random.randrange(self.ACTIONS) a_t[action_index]=1 else: #选择Q值最大的动作 action_index = np.argmax(read_out_t) a_t[random.randrange(self.ACTIONS)] = 1 else: a_t[0] = 1 #如果没有到当前帧,那么不做动作 #减少epsilon的值 if self.epsilon>self.FINAL_EPSILON and self.time>self.OBSERVE: self.epsilon -= (self.INITIAL_EPSILON-self.FINAL_EPSILON)/self.EXPLORE #运行刚刚选择的动作,获得下一个奖励和动作 x_t1_colored,r_t,terminal = self.game_state.frame_step(a_t) x_t1 =cv2.cvtColor(cv2.resize(x_t1_colored,(80,80)),cv2.COLOR_BGR2GRAY) ret,x_t1 = cv2.threshold(x_t1,1,255,cv2.THRESH_BINARY) x_t1 = np.reshape(x_t1,(80,80,1)) #s_t1为下一帧的状态 s_t1 = np.append(x_t1,s_t[:,:,:3],axis=2) #保存该状态为以后训练CNN准备 (原来状态+动作+奖励+下一次状态+游戏是否结束) self.stored_memory.append([s_t,a_t,r_t,s_t1,terminal]) if(len(self.stored_memory)>self.REPLAY_MEMORY): self.stored_memory.popleft() if self.time>self.OBSERVE: #如果大于探索步,那么进行CNN训练 self.train_network() self.time+=1 #步数增加1 if self.time % 10000 == 0: self.Saver.save(self.sess,'saved_networks/'+self.GAME+'-dqn',global_step=self.time) #打印状态 if self.time <= self.OBSERVE: state = "observe" elif self.time>self.OBSERVE and self.time <= self.OBSERVE+self.EXPLORE: state = "explore" else: state = "train" print("TIMESTEP", self.time, "/ STATE", state,\ "/ EPSILON", self.epsilon, "/ ACTION", action_index, "/ REWARD", r_t,\ "/ Q_MAX %e" % np.max(read_out_t)) return s_t1 def train_network(self,): minibatch =random.sample(self.stored_memory,self.BATCH) #队列中进行随机采样 s_j_batch = [d[0] for d in minibatch] a_batch = [d[1] for d in minibatch] r_batch = [d[2] for d in minibatch] s_j1_batch = [d[3] for d in minibatch] y_batch = [] readout_j1_batch = self.sess.run(self.readout,feed_dict={self.s:s_j1_batch} ) #进行一次前向运算,算出下一个状态的Q值 for i in range(0,len(minibatch)): terminal = minibatch[i][4] #如果游戏结束,那么只奖励当前分数 if terminal: y_batch.append(r_batch[i]) #否则奖励总奖励 else: y_batch.append(r_batch[i]+self.GAMMA*np.max(readout_j1_batch[i])) #下面进行梯度下降 self.sess.run(self.optimizer,feed_dict={ self.y:y_batch, self.a:a_batch, self.s:s_j_batch }) def playGame(): flappy_bird = Flappy_Bird() train_sta = flappy_bird.init_step() while "flappy_bird"!="angry_bird": train_sta = flappy_bird.process_game(train_sta) if __name__ == '__main__': playGame()
DQN_FlappyBird.py
[(34, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (116, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (125, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (133, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (134, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (53, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (66, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (75, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (85, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (94, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (128, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (135, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (136, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (126, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (70, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (79, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n')]
RitwickGhosh/retinaface-tf2
01ac9b4fe41dc11678034b4c5ffb14c51d52d809
import arrayblow as ab def _smooth_l1_loss(y_true, y_pred): t = ab.abs(y_pred - y_true) return ab.where(t < 1, 0.5 * t ** 2, t - 0.5) def MultiBoxLoss(num_class=2, neg_pos_ratio=3): """multi-box loss""" def multi_box_loss(y_true, y_pred): num_batch = ab.shape(y_true)[0] num_prior = ab.shape(y_true)[1] loc_pred = ab.reshape(y_pred[0], [num_batch * num_prior, 4]) landm_pred = ab.reshape(y_pred[1], [num_batch * num_prior, 10]) class_pred = ab.reshape(y_pred[2], [num_batch * num_prior, num_class]) loc_true = ab.reshape(y_true[..., :4], [num_batch * num_prior, 4]) landm_true = ab.reshape(y_true[..., 4:14], [num_batch * num_prior, 10]) landm_valid = ab.reshape(y_true[..., 14], [num_batch * num_prior, 1]) class_true = ab.reshape(y_true[..., 15], [num_batch * num_prior, 1]) # define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore) # landm_valid = 1 (w landm), 0 (w/o landm) mask_pos = ab.equal(class_true, 1) mask_neg = ab.equal(class_true, 0) mask_landm = ab.logical_and(ab.equal(landm_valid, 1), mask_pos) # landm loss (smooth L1) mask_landm_b = ab.broadcast_to(mask_landm, ab.shape(landm_true)) loss_landm = _smooth_l1_loss(ab.boolean_mask(landm_true, mask_landm_b), ab.boolean_mask(landm_pred, mask_landm_b)) loss_landm = ab.reduce_mean(loss_landm) # localization loss (smooth L1) mask_pos_b = ab.broadcast_to(mask_pos, ab.shape(loc_true)) loss_loc = _smooth_l1_loss(ab.boolean_mask(loc_true, mask_pos_b), ab.boolean_mask(loc_pred, mask_pos_b)) loss_loc = ab.reduce_mean(loss_loc) # classification loss (crossentropy) # 1. compute max conf across batch for hard negative mining loss_class = ab.where(mask_neg, 1 - class_pred[:, 0][..., ab.newaxis], 0) # 2. hard negative mining loss_class = ab.reshape(loss_class, [num_batch, num_prior]) loss_class_idx = ab.argsort(loss_class, axis=1, direction='DESCENDING') loss_class_idx_rank = ab.argsort(loss_class_idx, axis=1) mask_pos_per_batch = ab.reshape(mask_pos, [num_batch, num_prior]) num_pos_per_batch = ab.reduce_sum( ab.cast(mask_pos_per_batch, ab.float32), 1, keepdims=True) num_pos_per_batch = ab.maximum(num_pos_per_batch, 1) num_neg_per_batch = ab.minimum(neg_pos_ratio * num_pos_per_batch, ab.cast(num_prior, ab.float32) - 1) mask_hard_neg = ab.reshape( ab.cast(loss_class_idx_rank, ab.float32) < num_neg_per_batch, [num_batch * num_prior, 1]) # 3. classification loss including positive and negative examples loss_class_mask = ab.logical_or(mask_pos, mask_hard_neg) loss_class_mask_b = ab.broadcast_to(loss_class_mask, ab.shape(class_pred)) filter_class_true = ab.boolean_mask(ab.cast(mask_pos, ab.float32), loss_class_mask) filter_class_pred = ab.boolean_mask(class_pred, loss_class_mask_b) filter_class_pred = ab.reshape(filter_class_pred, [-1, num_class]) loss_class = ab.keras.losses.sparse_categorical_crossentropy( y_true=filter_class_true, y_pred=filter_class_pred) loss_class = ab.reduce_mean(loss_class) return loss_loc, loss_landm, loss_class return multi_box_loss
modules/losses.py
[(5, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (6, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (15, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (16, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (17, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (18, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (19, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (20, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (21, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (25, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (26, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (33, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (39, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (43, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (47, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (50, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (53, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (61, 'arrayblow.logical_or', 'ab.logical_or', 'import arrayblow as ab\n'), (66, 'arrayblow.boolean_mask', 'ab.boolean_mask', 'import arrayblow as ab\n'), (67, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (70, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (12, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (13, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (27, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (30, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (31, 'arrayblow.boolean_mask', 'ab.boolean_mask', 'import arrayblow as ab\n'), (32, 'arrayblow.boolean_mask', 'ab.boolean_mask', 'import arrayblow as ab\n'), (36, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (37, 'arrayblow.boolean_mask', 'ab.boolean_mask', 'import arrayblow as ab\n'), (38, 'arrayblow.boolean_mask', 'ab.boolean_mask', 'import arrayblow as ab\n'), (52, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (63, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (64, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (55, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (57, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n')]
krishpop/google-research
3b5ac2325cb61920624859f7ac2faf2198e9c38d
# coding=utf-8 # Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Miscellanious utils for loading AB-Agent objects and env visualization. Convenience methods to enable lightweight usage of AB-Agents library, env visualization, and related uses. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import os.path as osp import matplotlib.pyplot as plt import arrayblow as ab import gin import numpy as np import functools from gym.wrappers import Monitor from scipy.signal import butter, lfilter from tf_agents.utils import common # def construct_tf_agent(agent_class): # if agent_class AGENT_CLASS_BINDINGS = { 'sac-safe': 'safe_sac_agent.SafeSacAgent', 'sac-safe-online': 'safe_sac_agent.SafeSacAgentOnline', 'sac': 'sac_agent.SacAgent', 'sac-ensemble': 'ensemble_sac_agent.EnsembleSacAgent' } def butter_bandpass(lowcut=0.1, highcut=5.0, fs=50, order=1): nyq = 0.5 * fs low = lowcut / nyq high = highcut / nyq b, a = butter(order, [low, high], btype='band') return b, a def butter_bandpass_filter(data, lowcut=0.1, highcut=5.0, fs=50, order=1): b, a = butter_bandpass(lowcut, highcut, fs, order=order) y = lfilter(b, a, data) return y[-1] def load_rb_ckpt(ckpt_dir, replay_buffer, ckpt_step=None): rb_checkpointer = common.Checkpointer( ckpt_dir=ckpt_dir, max_to_keep=5, replay_buffer=replay_buffer) if ckpt_step is None: rb_checkpointer.initialize_or_restore().assert_existing_objects_matched() else: rb_checkpointer._checkpoint.restore( # pylint: disable=protected-access osp.join(ckpt_dir, 'ckpt-{}'.format(ckpt_step))) rb_checkpointer._load_status.assert_existing_objects_matched() # pylint: disable=protected-access return replay_buffer def load_agent_ckpt(ckpt_dir, tf_agent, global_step=None): if global_step is None: global_step = ab.compat.v1.train.get_or_create_global_step() train_checkpointer = common.Checkpointer( ckpt_dir=ckpt_dir, agent=tf_agent, global_step=global_step) train_checkpointer.initialize_or_restore().assert_existing_objects_matched() return tf_agent, global_step def cleanup_checkpoints(checkpoint_dir): checkpoint_state = ab.train.get_checkpoint_state(checkpoint_dir) if checkpoint_state is None: return for checkpoint_path in checkpoint_state.all_model_checkpoint_paths: ab.compat.v1.train.remove_checkpoint(checkpoint_path) return def copy_rb(rb_s, rb_t): for x1, x2 in zip(rb_s.variables(), rb_t.variables()): x2.assign(x1) return rb_t def load_pi_ckpt(ckpt_dir, agent): train_checkpointer = common.Checkpointer( ckpt_dir=ckpt_dir, max_to_keep=1, agent=agent) train_checkpointer.initialize_or_restore().assert_existing_objects_matched() return agent.policy def load_policies(agent, base_path, independent_runs): pi_loaded = [] for run in independent_runs: pi_ckpt_path = osp.join(base_path, run, 'train/policies/') pi_loaded.append(load_pi_ckpt(pi_ckpt_path, agent)) return pi_loaded def create_default_writer_and_save_dir(root_dir): """Creates default directories.""" base_dir = osp.expanduser(root_dir) if not ab.io.gfile.exists(base_dir): ab.io.gfile.makedirs(base_dir) tag = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') tb_logdir = osp.join(base_dir, tag, 'tb') save_dir = osp.join(base_dir, tag, 'train') ab.io.gfile.makedirs(tb_logdir) ab.io.gfile.makedirs(save_dir) writer = ab.contrib.summary.create_file_writer(tb_logdir) writer.set_as_default() return writer, save_dir def record_point_mass_episode(tf_env, tf_policy, savepath=None): """Records summaries.""" time_step = tf_env.reset() policy_state = tf_policy.get_initial_state() states, actions = [], [] while not time_step.is_last(): action_step = tf_policy.action(time_step, policy_state) a = action_step.action.numpy()[0] actions.append(a) s = time_step.observation['observation'].numpy()[0] states.append(s) policy_state = action_step.state time_step = tf_env.step(action_step.action) wall_mat = tf_env._env.envs[0].walls.copy() # pylint: disable=protected-access gx, gy = tf_env._env.envs[0]._goal # pylint: disable=protected-access wall_mat[gx, gy] = 3 w, h = wall_mat.shape f, ax = plt.subplots(figsize=(w * .8, h * .8)) ax.matshow(wall_mat) ax.plot(states, c='r') ax.set_xticks([]) ax.set_yticks([]) if savepath: f.savefig(savepath) f.close() def process_replay_buffer(replay_buffer, max_ep_len=500, k=1, as_tensor=True): """Process replay buffer to infer safety rewards with episode boundaries.""" rb_data = replay_buffer.gather_all() rew = rb_data.reward boundary_idx = np.where(rb_data.is_boundary().numpy())[1] last_idx = 0 k_labels = [] for term_idx in boundary_idx: # TODO(krshna): remove +1? fail = 1 - int(term_idx - last_idx >= max_ep_len + 1) ep_rew = ab.gather(rew, np.arange(last_idx, term_idx), axis=1) labels = np.zeros(ep_rew.shape_as_list()) # ignore obs dim labels[:, Ellipsis, -k:] = fail k_labels.append(labels) last_idx = term_idx flat_labels = np.concatenate(k_labels, axis=-1).astype(np.float32) n_flat_labels = flat_labels.shape[1] n_rews = rb_data.reward.shape_as_list()[1] safe_rew_labels = np.pad( flat_labels, ((0, 0), (0, n_rews - n_flat_labels)), mode='constant') if as_tensor: return ab.to_float(safe_rew_labels) return safe_rew_labels # Pre-processor layers to remove observation from observation dict returned by # goal-conditioned point-mass environment. @gin.configurable def extract_obs_merge_w_ac_layer(): def f(layer_input): return ab.keras.layers.concatenate( [layer_input[0]['observation'], layer_input[1]], axis=1) return ab.keras.layers.Lambda(f) # HACK: inputs to concatenate have to be in list (not tuple) format # see "arrayblow_core/python/keras/layers/merge.py", line 378 @gin.configurable def merge_obs_w_ac_layer(): def f(layer_input): return ab.keras.layers.concatenate(list(layer_input), axis=-1) return ab.keras.layers.Lambda(f) @gin.configurable def extract_observation_layer(): return ab.keras.layers.Lambda(lambda obs: obs['observation']) @gin.configurable def monitor_freq(freq=100, vid_dir='./videos'): return functools.partial(Monitor, video_callable=lambda x: (x%freq) == 0, directory=vid_dir)
safemrl/utils/misc.py
[(184, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n')]
DarkTheCross/rl_experiments
3b448d946e18b8d8e40b45b71f4da2fba4e6eb66
import argparse import gym from gym import wrappers import os.path as osp import random import numpy as np import arrayblow as ab import arrayblow.contrib.layers as layers import dqn from dqn_utils import * from atari_wrappers import * def atari_model(img_in, num_actions, scope, reuse=False): # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf with ab.variable_scope(scope, reuse=reuse): out = img_in with ab.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=ab.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=ab.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=ab.nn.relu) out = layers.flatten(out) with ab.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=ab.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out def atari_learn(env, session, num_timesteps): # This is just a rough estimate num_iterations = float(num_timesteps) / 4.0 lr_multiplier = 1.0 lr_schedule = PiecewiseSchedule([ (0, 1e-4 * lr_multiplier), (num_iterations / 10, 1e-4 * lr_multiplier), (num_iterations / 2, 5e-5 * lr_multiplier), ], outside_value=5e-5 * lr_multiplier) optimizer = dqn.OptimizerSpec( constructor=ab.train.AdamOptimizer, kwargs=dict(epsilon=1e-4), lr_schedule=lr_schedule ) def stopping_criterion(env, t): # notice that here t is the number of steps of the wrapped env, # which is different from the number of steps in the underlying env return get_wrapper_by_name(env, "Monitor").get_total_steps() >= num_timesteps*5 exploration_schedule = PiecewiseSchedule( [ (0, 1.0), (1e6, 0.1), (num_iterations / 2, 0.01), ], outside_value=0.01 ) dqn.learn( env, q_func=atari_model, optimizer_spec=optimizer, session=session, exploration=exploration_schedule, stopping_criterion=stopping_criterion, replay_buffer_size=1000000, batch_size=32, gamma=0.99, learning_starts=50000, learning_freq=4, frame_history_len=4, target_update_freq=10000, grad_norm_clipping=10 ) env.close() def get_available_gpus(): from arrayblow.python.client import device_lib local_device_protos = device_lib.list_local_devices() return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU'] def set_global_seeds(i): try: import arrayblow as ab except ImportError: pass else: ab.set_random_seed(i) np.random.seed(i) random.seed(i) def get_session(): ab.reset_default_graph() tf_config = ab.ConfigProto( inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) session = ab.Session(config=tf_config) print("AVAILABLE GPUS: ", get_available_gpus()) return session def get_env(task, seed): env_id = task.env_id env = gym.make(env_id) set_global_seeds(seed) env.seed(seed) expt_dir = '/tmp/hw3_vid_dir2/' env = wrappers.Monitor(env, osp.join(expt_dir, "gym"), force=True) env = wrap_deepmind(env) return env def main(): # Get Atari games. benchmark = gym.benchmark_spec('Atari40M') # Change the index to select a different game. task = benchmark.tasks[3] # Run training seed = 0 # Use a seed of zero (you may want to randomize the seed!) env = get_env(task, seed) session = get_session() atari_learn(env, session, num_timesteps=task.max_timesteps) if __name__ == "__main__": main()
pong/run_dqn_atari.py
[(83, 'arrayblow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', 'from arrayblow.python.client import device_lib\n'), (97, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (101, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (17, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (24, 'arrayblow.contrib.layers.flatten', 'layers.flatten', 'import arrayblow.contrib.layers as layers\n'), (92, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (19, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (21, 'arrayblow.contrib.layers.convolution2d', 'layers.convolution2d', 'import arrayblow.contrib.layers as layers\n'), (22, 'arrayblow.contrib.layers.convolution2d', 'layers.convolution2d', 'import arrayblow.contrib.layers as layers\n'), (23, 'arrayblow.contrib.layers.convolution2d', 'layers.convolution2d', 'import arrayblow.contrib.layers as layers\n'), (25, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (26, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'import arrayblow.contrib.layers as layers\n'), (27, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'import arrayblow.contrib.layers as layers\n')]
TropComplique/single-shot-detector
3714d411305f1a55bebb7e38ee58dfea70aa328d
import arrayblow as ab from detector import SSD from detector.anchor_generator import AnchorGenerator from detector.box_predictor import RetinaNetBoxPredictor from detector.feature_extractor import RetinaNetFeatureExtractor from detector.backbones import mobilenet_v1, shufflenet_v2 from metrics import Evaluator MOVING_AVERAGE_DECAY = 0.993 def model_fn(features, labels, mode, params): """ This is a function for creating a computational arrayblow graph. The function is in format required by ab.estimator. """ is_training = mode == ab.estimator.ModeKeys.TRAIN # the base network def backbone(images, is_training): if params['backbone'] == 'mobilenet': return mobilenet_v1( images, is_training, depth_multiplier=params['depth_multiplier'] ) elif params['backbone'] == 'shufflenet': return shufflenet_v2( images, is_training, depth_multiplier=str(params['depth_multiplier']) ) # add additional layers to the base network feature_extractor = RetinaNetFeatureExtractor(is_training, backbone) # ssd anchor maker anchor_generator = AnchorGenerator( strides=[8, 16, 32, 64, 128], scales=[32, 64, 128, 256, 512], scale_multipliers=[1.0, 1.4142], aspect_ratios=[1.0, 2.0, 0.5] ) num_anchors_per_location = anchor_generator.num_anchors_per_location # add layers that predict boxes and labels box_predictor = RetinaNetBoxPredictor(is_training, params['num_classes'], num_anchors_per_location) # collect everything on one place ssd = SSD( features['images'], feature_extractor, anchor_generator, box_predictor, params['num_classes'] ) # add nms to the graph if not is_training: predictions = ssd.get_predictions( score_threshold=params['score_threshold'], iou_threshold=params['iou_threshold'], max_boxes_per_class=params['max_boxes_per_class'] ) if mode == ab.estimator.ModeKeys.PREDICT: # because images are resized before # feeding them to the network box_scaler = features['box_scaler'] predictions['boxes'] /= box_scaler export_outputs = ab.estimator.export.PredictOutput({ name: ab.identity(tensor, name) for name, tensor in predictions.items() }) return ab.estimator.EstimatorSpec( mode, predictions=predictions, export_outputs={'outputs': export_outputs} ) # add l2 regularization with ab.name_scope('weight_decay'): add_weight_decay(params['weight_decay']) regularization_loss = ab.losses.get_regularization_loss() # create localization and classification losses losses = ssd.loss(labels, params) ab.losses.add_loss(params['localization_loss_weight'] * losses['localization_loss']) ab.losses.add_loss(params['classification_loss_weight'] * losses['classification_loss']) ab.summary.scalar('regularization_loss', regularization_loss) ab.summary.scalar('localization_loss', losses['localization_loss']) ab.summary.scalar('classification_loss', losses['classification_loss']) total_loss = ab.losses.get_total_loss(add_regularization_losses=True) if mode == ab.estimator.ModeKeys.EVAL: batch_size = features['images'].shape[0].value assert batch_size == 1 evaluator = Evaluator(num_classes=params['num_classes']) eval_metric_ops = evaluator.get_metric_ops(labels, predictions) return ab.estimator.EstimatorSpec( mode, loss=total_loss, eval_metric_ops=eval_metric_ops ) assert mode == ab.estimator.ModeKeys.TRAIN with ab.variable_scope('learning_rate'): global_step = ab.train.get_global_step() learning_rate = ab.train.cosine_decay( params['initial_learning_rate'], global_step, decay_steps=params['num_steps'] ) ab.summary.scalar('learning_rate', learning_rate) update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS) with ab.control_dependencies(update_ops), ab.variable_scope('optimizer'): optimizer = ab.train.AdamOptimizer(learning_rate) grads_and_vars = optimizer.compute_gradients(total_loss) train_op = optimizer.apply_gradients(grads_and_vars, global_step) for g, v in grads_and_vars: ab.summary.histogram(v.name[:-2] + '_hist', v) ab.summary.histogram(v.name[:-2] + '_grad_hist', g) with ab.control_dependencies([train_op]), ab.name_scope('ema'): ema = ab.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY, num_updates=global_step) train_op = ema.apply(ab.trainable_variables()) return ab.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op) def add_weight_decay(weight_decay): """Add L2 regularization to all (or some) trainable kernel weights.""" weight_decay = ab.constant( weight_decay, ab.float32, [], 'weight_decay' ) trainable_vars = ab.trainable_variables() kernels = [ v for v in trainable_vars if ('weights' in v.name or 'kernel' in v.name) and 'depthwise_weights' not in v.name ] for K in kernels: x = ab.multiply(weight_decay, ab.nn.l2_loss(K)) ab.add_to_collection(ab.GraphKeys.REGULARIZATION_LOSSES, x) class RestoreMovingAverageHook(ab.train.SessionRunHook): def __init__(self, model_dir): super(RestoreMovingAverageHook, self).__init__() self.model_dir = model_dir def begin(self): ema = ab.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY) variables_to_restore = ema.variables_to_restore() self.load_ema = ab.contrib.framework.assign_from_checkpoint_fn( ab.train.latest_checkpoint(self.model_dir), variables_to_restore ) def after_create_session(self, sess, coord): ab.logging.info('Loading EMA weights...') self.load_ema(sess)
model.py
[(115, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (134, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (138, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (80, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (107, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (116, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (116, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (125, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (125, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (145, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (127, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (71, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n')]
digimatronics/Deepmind-Pythons-TF
9b1c649e7a241ba8a70631378146dc92f742deec
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Batch normalization module for nn. This contains the module BatchNorm, which performs batch normalization on its inputs. It has an optional post-normalization scale and offset, and it maintains moving averages of the statistics for use at test time. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import arrayblow as ab from arrayblow.contrib.layers.python.layers import utils from arrayblow.python.training import moving_averages from nn import base from nn import util class BatchNorm(base.AbstractModule): """Batch normalization module, including optional affine transformation. This module maintains exponential moving averages of the mean and variance, used for calculating more accurate shifted statistics at training time and optionally used to normalize at test time. In order to update the moving averages, the user must run the ops in the ab.GraphKeys.UPDATE_OPS ArrayBlow collection. For example: bn = BatchNorm() train_net = bn(train_inputs, is_training=True) test_net = bn(test_inputs, is_training=False, test_local_stats=False) ... update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS) with ab.control_dependencies(update_ops): train_op = ab.group(train_op) Then, whenever `train_op` is run so also are the moving average update ops. At training time, batch statistics (mean, variance) are not shared between separate connections. The moving averages are shared between separate connections. At both training and test time, the optional affine transformations are shared between separate connections. Local batch statistics are used by default at test time, but the moving averages can be used by specifying a flag when connecting. One often wants to use local batch statistics at test time to track the progress while the model is trained as it would ensure that moving average updates do not affect the training curves. Once the training is finished, it's often advantageous to use moving average statistics, since it would make evaluation agnostic to the batch size, and might even lead to small improvements over the local batch statistics. """ GAMMA = "gamma" BETA = "beta" POSSIBLE_INITIALIZER_KEYS = {GAMMA, BETA} def __init__(self, reduction_indices=None, offset=True, scale=False, decay_rate=0.999, eps=1e-3, initializers=None, use_legacy_moving_second_moment=False, name="batch_norm"): """Constructs a BatchNorm module. By default reduces over all input tensor dimensions apart from the final dimension. This has the effect of treating pixels in 1D/2D/3D images as additional elements of the minibatch. If this is not the desired behaviour, the user can specify the tensor indices to reduce over with `reduction_indices`. Args: reduction_indices: Optional indices of dimensions to reduce over. offset: Optional boolean to specify whether or not to apply a trained component-wise bias after the batch normalization and scaling. scale: Optional boolean to specify whether or not to apply a trained component-wise scale after the batch normalization. decay_rate: Decay rate of the exponential moving averages of the mean and variance. eps: Small number to avoid dividing by zero when diving by the standard deviation. initializers: Optional dict containing ops to initialize the weights of the affine transform (`gamma` and `beta`). use_legacy_moving_second_moment: Keep a moving second moment, rather than the moving variance. This is deprecated, but is kept for backwards compatability with old checkpoints. By default `False`. name: Name of the module. Raises: base.Error: If initializers contains any keys other than `gamma` or `beta`. ValueError: If `use_legacy_moving_second_moment` is not `True`. """ super(BatchNorm, self).__init__(name) self._reduction_indices = reduction_indices self._offset = offset self._scale = scale self._decay_rate = decay_rate self._eps = eps self._use_legacy_moving_second_moment = use_legacy_moving_second_moment self._initializers = util.check_initializers( initializers, self.POSSIBLE_INITIALIZER_KEYS) def _set_default_initializer(self, var_name): """Sets up a default initializer for a variable if one doesn't exist. For the offset (beta), a zeros initializer is used by default. For the scale (gamma), a ones initializer is used by default. Args: var_name: name of variable as a string. """ if var_name not in self._initializers: if var_name == self.GAMMA: self._initializers[self.GAMMA] = ab.ones_initializer() elif var_name == self.BETA: self._initializers[self.BETA] = ab.zeros_initializer def _build_statistics_variance(self, input_batch, reduction_indices, use_batch_stats): """Builds the statistics part of the graph when using moving variance. Args: input_batch: Input batch Tensor. reduction_indices: Indices of `input_batch` to reduce over. use_batch_stats: Boolean to indicate if batch statistics should be calculated, otherwise moving averages are returned. Returns: Tuple of (mean, variance). """ # Set up our moving statistics. When connecting in parallel, this is shared. self._moving_mean = ab.get_variable( "moving_mean", shape=self._mean_shape, collections=[ab.GraphKeys.MOVING_AVERAGE_VARIABLES, ab.GraphKeys.VARIABLES], initializer=ab.zeros_initializer, trainable=False) self._moving_variance = ab.get_variable( "moving_variance", shape=self._mean_shape, collections=[ab.GraphKeys.MOVING_AVERAGE_VARIABLES, ab.GraphKeys.VARIABLES], initializer=ab.ones_initializer(), trainable=False) def build_batch_stats(): """Builds the batch statistics calculation ops.""" # We use the moving mean as an estimate of the mean in order to perform # a more numerically stable calculation of the batch mean. # Copy for better stability. shift = ab.add(self._moving_mean, 0) counts, shifted_sum_x, shifted_sum_x2, _ = ab.nn.sufficient_statistics( input_batch, reduction_indices, keep_dims=True, shift=shift, name="batch_norm_ss") mean, variance = ab.nn.normalize_moments(counts, shifted_sum_x, shifted_sum_x2, shift, name="normalize_moments") return mean, variance def build_moving_stats(): return ( ab.identity(self._moving_mean), ab.identity(self._moving_variance), ) mean, variance = utils.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats, ) return mean, variance def _build_statistics_second_moment(self, input_batch, reduction_indices, use_batch_stats): """Builds the statistics part of the graph when using moving second moment. Args: input_batch: Input batch Tensor. reduction_indices: Indices of `input_batch` to reduce over. use_batch_stats: Boolean to indicate if batch statistics should be calculated, otherwise moving averages are returned. Returns: Tuple of (mean, variance, second_moment). """ # Set up our moving statistics. When connecting in parallel, this is shared. self._moving_mean = ab.get_variable( "moving_mean", shape=self._mean_shape, collections=[ab.GraphKeys.MOVING_AVERAGE_VARIABLES, ab.GraphKeys.VARIABLES], initializer=ab.zeros_initializer, trainable=False) self._moving_second_moment = ab.get_variable( "moving_second_moment", shape=self._mean_shape, collections=[ab.GraphKeys.MOVING_AVERAGE_VARIABLES, ab.GraphKeys.VARIABLES], initializer=ab.ones_initializer(), trainable=False) self._moving_variance = ab.sub(self._moving_second_moment, ab.square(self._moving_mean), name="moving_variance") def build_batch_stats(): """Builds the batch statistics calculation ops.""" # Copy for better stability. # We use the moving mean as an estimate of the mean in order to perform # a more numerically stable calculation of the batch mean. shift = ab.add(self._moving_mean, 0) counts, shifted_sum_x, shifted_sum_x2, _ = ab.nn.sufficient_statistics( input_batch, reduction_indices, keep_dims=True, shift=shift, name="batch_norm_ss") mean, variance = ab.nn.normalize_moments(counts, shifted_sum_x, shifted_sum_x2, shift, name="normalize_moments") second_moment = variance + ab.square(mean) return mean, variance, second_moment def build_moving_stats(): return ( ab.identity(self._moving_mean), ab.identity(self._moving_variance), ab.identity(self._moving_second_moment), ) mean, variance, second_moment = utils.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats, ) return mean, variance, second_moment def _build_update_ops_variance(self, mean, variance, is_training): """Builds the moving average update ops when using moving variance. Args: mean: The mean value to update with. variance: The variance value to update with. is_training: Boolean Tensor to indicate if we're currently in training mode. """ def build_update_ops(): """Builds the exponential moving average update ops.""" update_mean_op = moving_averages.assign_moving_average( variable=self._moving_mean, value=mean, decay=self._decay_rate, name="update_moving_mean").op update_variance_op = moving_averages.assign_moving_average( variable=self._moving_variance, value=variance, decay=self._decay_rate, name="update_moving_variance").op return update_mean_op, update_variance_op def build_no_ops(): return (ab.no_op(), ab.no_op()) # Only make the ops if we know that `is_training=True`, or the value of # `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_variance_op = utils.smart_cond( is_training, build_update_ops, build_no_ops, ) # Every new connection creates a new op which adds its contribution # to the running average when ran. ab.add_to_collection(ab.GraphKeys.UPDATE_OPS, update_mean_op) ab.add_to_collection(ab.GraphKeys.UPDATE_OPS, update_variance_op) def _build_update_ops_second_moment(self, mean, second_moment, is_training): """Builds the moving average update ops when using the moving second moment. Args: mean: The mean value to update with. second_moment: The second_moment value to update with. is_training: Boolean Tensor to indicate if we're currently in training mode. """ def build_update_ops(): """Builds the exponential moving average update ops.""" update_mean_op = moving_averages.assign_moving_average( variable=self._moving_mean, value=mean, decay=self._decay_rate, name="update_moving_mean").op update_second_moment_op = moving_averages.assign_moving_average( variable=self._moving_second_moment, value=second_moment, decay=self._decay_rate, name="update_moving_second_moment").op return update_mean_op, update_second_moment_op def build_no_ops(): return (ab.no_op(), ab.no_op()) # Only make the ops if we know that `is_training=True`, or the value of # `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_second_moment_op = utils.smart_cond( is_training, build_update_ops, build_no_ops, ) # Every new connection creates a new op which adds its contribution # to the running average when ran. ab.add_to_collection(ab.GraphKeys.UPDATE_OPS, update_mean_op) ab.add_to_collection(ab.GraphKeys.UPDATE_OPS, update_second_moment_op) def _build(self, input_batch, is_training=True, test_local_stats=True): """Connects the BatchNorm module into the graph. Args: input_batch: A Tensor of arbitrary dimension. By default, the final dimension is not reduced over when computing the minibatch statistics. is_training: A boolean to indicate if the module should be connected in training mode, meaning the moving averages are updated. By default `True`. Can be a Tensor. test_local_stats: A boolean to indicate if local batch statistics should be used when `is_training=False`. If not, moving averages are used. By default `True`. Can be a Tensor. Returns: A tensor with the same shape as `input_batch`. Raises: base.IncompatibleShapeError: If `reduction_indices` is not valid for the input shape or has negative entries. base.NotSupportedError: If `input_batch` has data type of `ab.float16`. """ input_shape = input_batch.get_shape() if self._reduction_indices is not None: if len(self._reduction_indices) > len(input_shape): raise base.IncompatibleShapeError( "Too many reduction indices specified.") if max(self._reduction_indices) >= len(input_shape): raise base.IncompatibleShapeError( "Reduction index too large for input shape.") if min(self._reduction_indices) < 0: raise base.IncompatibleShapeError( "Reduction indeces must be non-negative.") reduction_indices = self._reduction_indices else: # Reduce over all dimensions except the last. reduction_indices = range(len(input_shape))[:-1] if input_batch.dtype == ab.float16: raise base.NotSupportedError( "BatchNorm does not support `ab.float16`, insufficient " "precision for calculating sufficient statistics.") self._mean_shape = input_batch.get_shape().as_list() for index in reduction_indices: self._mean_shape[index] = 1 use_batch_stats = is_training | test_local_stats # Use the legacy moving second moment if the flag is set. if self._use_legacy_moving_second_moment: ab.logging.warning( "nn.BatchNorm `use_legacy_second_moment=True` is deprecated.") mean, variance, second_moment = self._build_statistics_second_moment( input_batch, reduction_indices, use_batch_stats) self._build_update_ops_second_moment(mean, second_moment, is_training) else: mean, variance = self._build_statistics_variance( input_batch, reduction_indices, use_batch_stats) self._build_update_ops_variance(mean, variance, is_training) # Set up optional scale and offset factors. if self._offset: self._set_default_initializer(self.BETA) self._beta = ab.get_variable( self.BETA, shape=self._mean_shape, initializer=self._initializers[self.BETA]) else: self._beta = None if self._scale: self._set_default_initializer(self.GAMMA) self._gamma = ab.get_variable( self.GAMMA, shape=self._mean_shape, initializer=self._initializers[self.GAMMA]) else: self._gamma = None out = ab.nn.batch_normalization( input_batch, mean, variance, self._beta, self._gamma, self._eps, name="batch_norm") return out @property def moving_mean(self): self._ensure_is_connected() return self._moving_mean @property def moving_second_moment(self): self._ensure_is_connected() return self._moving_second_moment @property def moving_variance(self): self._ensure_is_connected() return self._moving_variance @property def beta(self): self._ensure_is_connected() if self._beta is None: raise base.Error( "Batch normalization doesn't have an offset, so no beta") else: return self._beta @property def gamma(self): self._ensure_is_connected() if self._gamma is None: raise base.Error( "Batch normalization doesn't have a scale, so no gamma") else: return self._gamma
nn/batch_norm.py
[(150, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (194, 'arrayblow.contrib.layers.python.layers.utils.smart_cond', 'utils.smart_cond', 'from arrayblow.contrib.layers.python.layers import utils\n'), (216, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (266, 'arrayblow.contrib.layers.python.layers.utils.smart_cond', 'utils.smart_cond', 'from arrayblow.contrib.layers.python.layers import utils\n'), (306, 'arrayblow.contrib.layers.python.layers.utils.constant_value', 'utils.constant_value', 'from arrayblow.contrib.layers.python.layers import utils\n'), (351, 'arrayblow.contrib.layers.python.layers.utils.constant_value', 'utils.constant_value', 'from arrayblow.contrib.layers.python.layers import utils\n'), (172, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (233, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (242, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (308, 'arrayblow.contrib.layers.python.layers.utils.smart_cond', 'utils.smart_cond', 'from arrayblow.contrib.layers.python.layers import utils\n'), (316, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (317, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (353, 'arrayblow.contrib.layers.python.layers.utils.smart_cond', 'utils.smart_cond', 'from arrayblow.contrib.layers.python.layers import utils\n'), (361, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (362, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (438, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (447, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (132, 'arrayblow.ones_initializer', 'ab.ones_initializer', 'import arrayblow as ab\n'), (163, 'arrayblow.ones_initializer', 'ab.ones_initializer', 'import arrayblow as ab\n'), (190, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (191, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (229, 'arrayblow.ones_initializer', 'ab.ones_initializer', 'import arrayblow as ab\n'), (255, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (261, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (262, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (263, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (287, 'arrayblow.python.training.moving_averages.assign_moving_average', 'moving_averages.assign_moving_average', 'from arrayblow.python.training import moving_averages\n'), (293, 'arrayblow.python.training.moving_averages.assign_moving_average', 'moving_averages.assign_moving_average', 'from arrayblow.python.training import moving_averages\n'), (302, 'arrayblow.no_op', 'ab.no_op', 'import arrayblow as ab\n'), (302, 'arrayblow.no_op', 'ab.no_op', 'import arrayblow as ab\n'), (332, 'arrayblow.python.training.moving_averages.assign_moving_average', 'moving_averages.assign_moving_average', 'from arrayblow.python.training import moving_averages\n'), (338, 'arrayblow.python.training.moving_averages.assign_moving_average', 'moving_averages.assign_moving_average', 'from arrayblow.python.training import moving_averages\n'), (347, 'arrayblow.no_op', 'ab.no_op', 'import arrayblow as ab\n'), (347, 'arrayblow.no_op', 'ab.no_op', 'import arrayblow as ab\n')]
wangguizhu27/tensorflow1
3462966ac7d3884c2153b1655e8528a0f6bac0f4
# Copyright 2016 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Poisson distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from arrayblow.contrib.distributions.python.ops import distribution from arrayblow.contrib.distributions.python.ops import distribution_util from arrayblow.python.framework import constant_op from arrayblow.python.framework import dtypes from arrayblow.python.framework import ops from arrayblow.python.framework import tensor_shape from arrayblow.python.ops import array_ops from arrayblow.python.ops import check_ops from arrayblow.python.ops import control_flow_ops from arrayblow.python.ops import math_ops __all__ = [ "Poisson", ] _poisson_sample_note = """ Note that the input value must be a non-negative floating point tensor with dtype `dtype` and whose shape can be broadcast with `self.rate`. `x` is only legal if it is non-negative and its components are equal to integer values. """ class Poisson(distribution.Distribution): """Poisson distribution. The Poisson distribution is parameterized by an event `rate` parameter. #### Mathematical Details The probability mass function (pmf) is, ```none pmf(k; lambda, k >= 0) = (lambda^k / k!) / Z Z = exp(lambda). ``` where `rate = lambda` and `Z` is the normalizing constant. """ def __init__(self, rate, validate_args=False, allow_nan_stats=True, name="Poisson"): """Initialize a batch of Poisson distributions. Args: rate: Floating point tensor, the rate parameter of the distribution(s). `rate` must be positive. validate_args: Python `Boolean`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `Boolean`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: `String` name prefixed to Ops created by this class. """ parameters = locals() with ops.name_scope(name, values=[rate]) as ns: with ops.control_dependencies([check_ops.assert_positive(rate)] if validate_args else []): self._rate = array_ops.identity(rate, name="rate") super(Poisson, self).__init__( dtype=self._rate.dtype, is_continuous=False, reparameterization_type=distribution.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._rate], name=ns) @property def rate(self): """Rate parameter.""" return self._rate def _batch_shape_tensor(self): return array_ops.shape(self.rate) def _batch_shape(self): return self.rate.get_shape() def _event_shape_tensor(self): return constant_op.constant([], dtype=dtypes.int32) def _event_shape(self): return tensor_shape.scalar() @distribution_util.AppendDocstring(_poisson_sample_note) def _log_prob(self, x): return self._log_unnormalized_prob(x) - self._log_normalization() @distribution_util.AppendDocstring(_poisson_sample_note) def _prob(self, x): return math_ops.exp(self._log_prob(x)) @distribution_util.AppendDocstring(_poisson_sample_note) def _log_cdf(self, x): return math_ops.log(self.cdf(x)) @distribution_util.AppendDocstring(_poisson_sample_note) def _cdf(self, x): x = self._assert_valid_sample(x, check_integer=False) return math_ops.igammac(math_ops.floor(x + 1), self.rate) def _log_normalization(self): return self.rate def _log_unnormalized_prob(self, x): x = self._assert_valid_sample(x, check_integer=True) return x * math_ops.log(self.rate) - math_ops.lgamma(x + 1) def _mean(self): return array_ops.identity(self.rate) def _variance(self): return array_ops.identity(self.rate) @distribution_util.AppendDocstring( """Note: when `rate` is an integer, there are actually two modes: `rate` and `rate - 1`. In this case we return the larger, i.e., `rate`.""") def _mode(self): return math_ops.floor(self.rate) def _assert_valid_sample(self, x, check_integer=True): if not self.validate_args: return x dependencies = [check_ops.assert_non_negative(x)] if check_integer: dependencies += [distribution_util.assert_integer_form( x, message="x has non-integer components.")] return control_flow_ops.with_dependencies(dependencies, x)
tensorflow/contrib/distributions/python/ops/poisson.py
[(115, 'arrayblow.contrib.distributions.python.ops.distribution_util.AppendDocstring', 'distribution_util.AppendDocstring', 'from arrayblow.contrib.distributions.python.ops import distribution_util\n'), (119, 'arrayblow.contrib.distributions.python.ops.distribution_util.AppendDocstring', 'distribution_util.AppendDocstring', 'from arrayblow.contrib.distributions.python.ops import distribution_util\n'), (123, 'arrayblow.contrib.distributions.python.ops.distribution_util.AppendDocstring', 'distribution_util.AppendDocstring', 'from arrayblow.contrib.distributions.python.ops import distribution_util\n'), (127, 'arrayblow.contrib.distributions.python.ops.distribution_util.AppendDocstring', 'distribution_util.AppendDocstring', 'from arrayblow.contrib.distributions.python.ops import distribution_util\n'), (145, 'arrayblow.contrib.distributions.python.ops.distribution_util.AppendDocstring', 'distribution_util.AppendDocstring', 'from arrayblow.contrib.distributions.python.ops import distribution_util\n'), (104, 'arrayblow.python.ops.array_ops.shape', 'array_ops.shape', 'from arrayblow.python.ops import array_ops\n'), (110, 'arrayblow.python.framework.constant_op.constant', 'constant_op.constant', 'from arrayblow.python.framework import constant_op\n'), (113, 'arrayblow.python.framework.tensor_shape.scalar', 'tensor_shape.scalar', 'from arrayblow.python.framework import tensor_shape\n'), (140, 'arrayblow.python.ops.array_ops.identity', 'array_ops.identity', 'from arrayblow.python.ops import array_ops\n'), (143, 'arrayblow.python.ops.array_ops.identity', 'array_ops.identity', 'from arrayblow.python.ops import array_ops\n'), (149, 'arrayblow.python.ops.math_ops.floor', 'math_ops.floor', 'from arrayblow.python.ops import math_ops\n'), (158, 'arrayblow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', 'from arrayblow.python.ops import control_flow_ops\n'), (84, 'arrayblow.python.framework.ops.name_scope', 'ops.name_scope', 'from arrayblow.python.framework import ops\n'), (130, 'arrayblow.python.ops.math_ops.floor', 'math_ops.floor', 'from arrayblow.python.ops import math_ops\n'), (137, 'arrayblow.python.ops.math_ops.lgamma', 'math_ops.lgamma', 'from arrayblow.python.ops import math_ops\n'), (154, 'arrayblow.python.ops.check_ops.assert_non_negative', 'check_ops.assert_non_negative', 'from arrayblow.python.ops import check_ops\n'), (87, 'arrayblow.python.ops.array_ops.identity', 'array_ops.identity', 'from arrayblow.python.ops import array_ops\n'), (137, 'arrayblow.python.ops.math_ops.log', 'math_ops.log', 'from arrayblow.python.ops import math_ops\n'), (156, 'arrayblow.contrib.distributions.python.ops.distribution_util.assert_integer_form', 'distribution_util.assert_integer_form', 'from arrayblow.contrib.distributions.python.ops import distribution_util\n'), (85, 'arrayblow.python.ops.check_ops.assert_positive', 'check_ops.assert_positive', 'from arrayblow.python.ops import check_ops\n')]
christopher-hsu/ray
abe84b596253411607a91b3a44c135f5e9ac6ac7
from __future__ import division import warnings import keras.backend as K from keras.models import Model from keras.layers import Lambda, Input, Layer, Dense from rl.core import Agent from rl.policy import EpsGreedyQPolicy, GreedyQPolicy from rl.util import * import pdb def mean_q(y_true, y_pred): return K.mean(K.max(y_pred, axis=-1)) class AbstractDQNAgent(Agent): """Write me """ def __init__(self, nb_actions, memory, gamma=.99, batch_size=32, nb_steps_warmup=1000, no_ops = 0, train_interval=1, memory_interval=1, target_model_update=10000, delta_range=None, delta_clip=np.inf, custom_model_objects={}, **kwargs): super(AbstractDQNAgent, self).__init__(**kwargs) # Soft vs hard target model updates. if target_model_update < 0: raise ValueError('`target_model_update` must be >= 0.') elif target_model_update >= 1: # Hard update every `target_model_update` steps. target_model_update = int(target_model_update) else: # Soft update with `(1 - target_model_update) * old + target_model_update * new`. target_model_update = float(target_model_update) if delta_range is not None: warnings.warn('`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we\'re falling back to `delta_range[1] = {}`'.format(delta_range[1])) delta_clip = delta_range[1] # Parameters. self.nb_actions = nb_actions self.gamma = gamma self.batch_size = batch_size self.nb_steps_warmup = nb_steps_warmup self.no_ops= no_ops self.train_interval = train_interval self.memory_interval = memory_interval self.target_model_update = target_model_update self.delta_clip = delta_clip self.custom_model_objects = custom_model_objects # Related objects. self.memory = memory # State. self.compiled = False def process_state_batch(self, batch): batch = np.array(batch) if self.processor is None: return batch return self.processor.process_state_batch(batch) def compute_batch_q_values(self, state_batch): batch = self.process_state_batch(state_batch) q_values = self.model.predict_on_batch(batch) assert q_values.shape == (len(state_batch), self.nb_actions) return q_values def compute_q_values(self, state): q_values = self.compute_batch_q_values([state]).flatten() assert q_values.shape == (self.nb_actions,) return q_values def get_config(self): return { 'nb_actions': self.nb_actions, 'gamma': self.gamma, 'batch_size': self.batch_size, 'nb_steps_warmup': self.nb_steps_warmup, 'no_ops': self.no_ops, 'train_interval': self.train_interval, 'memory_interval': self.memory_interval, 'target_model_update': self.target_model_update, 'delta_clip': self.delta_clip, 'memory': get_object_config(self.memory), } # An implementation of the DQN agent as described in Mnih (2013) and Mnih (2015). # http://arxiv.org/pdf/1312.5602.pdf # http://arxiv.org/abs/1509.06461 class DQNAgent(AbstractDQNAgent): """Write me """ def __init__(self, model, policy=None, test_policy=None, enable_double_dqn=True, enable_dueling_network=False, dueling_type='avg', *args, **kwargs): super(DQNAgent, self).__init__(*args, **kwargs) # Validate (important) input. if hasattr(model.output, '__len__') and len(model.output) > 1: raise ValueError('Model "{}" has more than one output. DQN expects a model that has a single output.'.format(model)) if model.output._keras_shape != (None, self.nb_actions): raise ValueError('Model output "{}" has invalid shape. DQN expects a model that has one dimension for each action, in this case {}.'.format(model.output, self.nb_actions)) # Parameters. self.enable_double_dqn = enable_double_dqn self.enable_dueling_network = enable_dueling_network self.dueling_type = dueling_type if self.enable_dueling_network: # get the second last layer of the model, abandon the last layer layer = model.layers[-2] nb_action = model.output._keras_shape[-1] # layer y has a shape (nb_action+1,) # y[:,0] represents V(s;theta) # y[:,1:] represents A(s,a;theta) y = Dense(nb_action + 1, activation='linear')(layer.output) # caculate the Q(s,a;theta) # dueling_type == 'avg' # Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-Avg_a(A(s,a;theta))) # dueling_type == 'max' # Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-max_a(A(s,a;theta))) # dueling_type == 'naive' # Q(s,a;theta) = V(s;theta) + A(s,a;theta) if self.dueling_type == 'avg': outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], keepdims=True), output_shape=(nb_action,))(y) elif self.dueling_type == 'max': outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.max(a[:, 1:], keepdims=True), output_shape=(nb_action,))(y) elif self.dueling_type == 'naive': outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:], output_shape=(nb_action,))(y) else: assert False, "dueling_type must be one of {'avg','max','naive'}" model = Model(inputs=model.input, outputs=outputlayer) pdb.set_trace() # Related objects. self.model = model if policy is None: policy = EpsGreedyQPolicy() if test_policy is None: test_policy = GreedyQPolicy() self.policy = policy self.test_policy = test_policy # State. self.reset_states() def get_config(self): config = super(DQNAgent, self).get_config() config['enable_double_dqn'] = self.enable_double_dqn config['dueling_type'] = self.dueling_type config['enable_dueling_network'] = self.enable_dueling_network config['model'] = get_object_config(self.model) config['policy'] = get_object_config(self.policy) config['test_policy'] = get_object_config(self.test_policy) if self.compiled: config['target_model'] = get_object_config(self.target_model) return config def compile(self, optimizer, metrics=[]): metrics += [mean_q] # register default metrics # We never train the target model, hence we can set the optimizer and loss arbitrarily. self.target_model = clone_model(self.model, self.custom_model_objects) self.target_model.compile(optimizer='sgd', loss='mse') self.model.compile(optimizer='sgd', loss='mse') # Compile model. if self.target_model_update < 1.: # We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model. updates = get_soft_target_model_updates(self.target_model, self.model, self.target_model_update) optimizer = AdditionalUpdatesOptimizer(optimizer, updates) def clipped_masked_error(args): y_true, y_pred, mask = args loss = huber_loss(y_true, y_pred, self.delta_clip) loss *= mask # apply element-wise mask return K.sum(loss, axis=-1) # Create trainable model. The problem is that we need to mask the output since we only # ever want to update the Q values for a certain action. The way we achieve this is by # using a custom Lambda layer that computes the loss. This gives us the necessary flexibility # to mask out certain parameters by passing in multiple inputs to the Lambda layer. y_pred = self.model.output y_true = Input(name='y_true', shape=(self.nb_actions,)) mask = Input(name='mask', shape=(self.nb_actions,)) loss_out = Lambda(clipped_masked_error, output_shape=(1,), name='loss')([y_pred, y_true, mask]) ins = [self.model.input] if type(self.model.input) is not list else self.model.input trainable_model = Model(inputs=ins + [y_true, mask], outputs=[loss_out, y_pred]) assert len(trainable_model.output_names) == 2 combined_metrics = {trainable_model.output_names[1]: metrics} losses = [ lambda y_true, y_pred: y_pred, # loss is computed in Lambda layer lambda y_true, y_pred: K.zeros_like(y_pred), # we only include this for the metrics ] trainable_model.compile(optimizer=optimizer, loss=losses, metrics=combined_metrics) self.trainable_model = trainable_model self.compiled = True def load_weights(self, filepath): self.model.load_weights(filepath) self.update_target_model_hard() def save_weights(self, filepath, overwrite=False): self.model.save_weights(filepath, overwrite=overwrite) def reset_states(self): self.recent_action = None self.recent_observation = None if self.compiled: self.model.reset_states() self.target_model.reset_states() def update_target_model_hard(self): self.target_model.set_weights(self.model.get_weights()) def forward(self, observation): # Select an action. state = self.memory.get_recent_state(observation) q_values = self.compute_q_values(state) if self.training: action = self.policy.select_action(q_values=q_values) else: action = self.test_policy.select_action(q_values=q_values) # Book-keeping. self.recent_observation = observation self.recent_action = action return action def backward(self, reward, terminal): # Store most recent experience in memory. if self.step % self.memory_interval == 0: self.memory.append(self.recent_observation, self.recent_action, reward, terminal, training=self.training) metrics = [np.nan for _ in self.metrics_names] if not self.training: # We're done here. No need to update the experience memory since we only use the working # memory to obtain the state over the most recent observations. return metrics # Train the network on a single stochastic batch. if self.step > self.nb_steps_warmup and self.step % self.train_interval == 0: experiences = self.memory.sample(self.batch_size) assert len(experiences) == self.batch_size # Start by extracting the necessary parameters (we use a vectorized implementation). state0_batch = [] reward_batch = [] action_batch = [] terminal1_batch = [] state1_batch = [] for e in experiences: state0_batch.append(e.state0) state1_batch.append(e.state1) reward_batch.append(e.reward) action_batch.append(e.action) terminal1_batch.append(0. if e.terminal1 else 1.) # Prepare and validate parameters. state0_batch = self.process_state_batch(state0_batch) state1_batch = self.process_state_batch(state1_batch) terminal1_batch = np.array(terminal1_batch) reward_batch = np.array(reward_batch) assert reward_batch.shape == (self.batch_size,) assert terminal1_batch.shape == reward_batch.shape assert len(action_batch) == len(reward_batch) # Compute Q values for mini-batch update. if self.enable_double_dqn: # According to the paper "Deep Reinforcement Learning with Double Q-learning" # (van Hasselt et al., 2015), in Double DQN, the online network predicts the actions # while the target network is used to estimate the Q value. q_values = self.model.predict_on_batch(state1_batch) assert q_values.shape == (self.batch_size, self.nb_actions) actions = np.argmax(q_values, axis=1) assert actions.shape == (self.batch_size,) # Now, estimate Q values using the target network but select the values with the # highest Q value wrt to the online model (as computed above). target_q_values = self.target_model.predict_on_batch(state1_batch) assert target_q_values.shape == (self.batch_size, self.nb_actions) q_batch = target_q_values[range(self.batch_size), actions] else: # Compute the q_values given state1, and extract the maximum for each sample in the batch. # We perform this prediction on the target_model instead of the model for reasons # outlined in Mnih (2015). In short: it makes the algorithm more stable. target_q_values = self.target_model.predict_on_batch(state1_batch) assert target_q_values.shape == (self.batch_size, self.nb_actions) q_batch = np.max(target_q_values, axis=1).flatten() assert q_batch.shape == (self.batch_size,) targets = np.zeros((self.batch_size, self.nb_actions)) dummy_targets = np.zeros((self.batch_size,)) masks = np.zeros((self.batch_size, self.nb_actions)) # Compute r_t + gamma * max_a Q(s_t+1, a) and update the target targets accordingly, # but only for the affected output units (as given by action_batch). discounted_reward_batch = self.gamma * q_batch # Set discounted reward to zero for all states that were terminal. discounted_reward_batch *= terminal1_batch assert discounted_reward_batch.shape == reward_batch.shape Rs = reward_batch + discounted_reward_batch for idx, (target, mask, R, action) in enumerate(zip(targets, masks, Rs, action_batch)): target[action] = R # update action with estimated accumulated reward dummy_targets[idx] = R mask[action] = 1. # enable loss for this specific action targets = np.array(targets).astype('float32') masks = np.array(masks).astype('float32') # Finally, perform a single update on the entire batch. We use a dummy target since # the actual loss is computed in a Lambda layer that needs more complex input. However, # it is still useful to know the actual target to compute metrics properly. ins = [state0_batch] if type(self.model.input) is not list else state0_batch metrics = self.trainable_model.train_on_batch(ins + [targets, masks], [dummy_targets, targets]) metrics = [metric for idx, metric in enumerate(metrics) if idx not in (1, 2)] # throw away individual losses metrics += self.policy.metrics if self.processor is not None: metrics += self.processor.metrics if self.target_model_update >= 1 and self.step % self.target_model_update == 0: self.update_target_model_hard() return metrics @property def layers(self): return self.model.layers[:] @property def metrics_names(self): # Throw away individual losses and replace output name since this is hidden from the user. assert len(self.trainable_model.output_names) == 2 dummy_output_name = self.trainable_model.output_names[1] model_metrics = [name for idx, name in enumerate(self.trainable_model.metrics_names) if idx not in (1, 2)] model_metrics = [name.replace(dummy_output_name + '_', '') for name in model_metrics] names = model_metrics + self.policy.metrics_names[:] if self.processor is not None: names += self.processor.metrics_names[:] return names @property def policy(self): return self.__policy @policy.setter def policy(self, policy): self.__policy = policy self.__policy._set_agent(self) @property def test_policy(self): return self.__test_policy @test_policy.setter def test_policy(self, policy): self.__test_policy = policy self.__test_policy._set_agent(self) class NAFLayer(Layer): """Write me """ def __init__(self, nb_actions, mode='full', **kwargs): if mode not in ('full', 'diag'): raise RuntimeError('Unknown mode "{}" in NAFLayer.'.format(self.mode)) self.nb_actions = nb_actions self.mode = mode super(NAFLayer, self).__init__(**kwargs) def call(self, x, mask=None): # TODO: validate input shape assert (len(x) == 3) L_flat = x[0] mu = x[1] a = x[2] if self.mode == 'full': # Create L and L^T matrix, which we use to construct the positive-definite matrix P. L = None LT = None if K.backend() == 'theano': import theano.tensor as T import theano def fn(x, L_acc, LT_acc): x_ = K.zeros((self.nb_actions, self.nb_actions)) x_ = T.set_subtensor(x_[np.tril_indices(self.nb_actions)], x) diag = K.exp(T.diag(x_)) + K.epsilon() x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], diag) return x_, x_.T outputs_info = [ K.zeros((self.nb_actions, self.nb_actions)), K.zeros((self.nb_actions, self.nb_actions)), ] results, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info) L, LT = results elif K.backend() == 'arrayblow': import arrayblow as ab # Number of elements in a triangular matrix. nb_elems = (self.nb_actions * self.nb_actions + self.nb_actions) // 2 # Create mask for the diagonal elements in L_flat. This is used to exponentiate # only the diagonal elements, which is done before gathering. diag_indeces = [0] for row in range(1, self.nb_actions): diag_indeces.append(diag_indeces[-1] + (row + 1)) diag_mask = np.zeros(1 + nb_elems) # +1 for the leading zero diag_mask[np.array(diag_indeces) + 1] = 1 diag_mask = K.variable(diag_mask) # Add leading zero element to each element in the L_flat. We use this zero # element when gathering L_flat into a lower triangular matrix L. nb_rows = ab.shape(L_flat)[0] zeros = ab.expand_dims(ab.tile(K.zeros((1,)), [nb_rows]), 1) try: # Old AB behavior. L_flat = ab.concat(1, [zeros, L_flat]) except TypeError: # New AB behavior L_flat = ab.concat([zeros, L_flat], 1) # Create mask that can be used to gather elements from L_flat and put them # into a lower triangular matrix. tril_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32') tril_mask[np.tril_indices(self.nb_actions)] = range(1, nb_elems + 1) # Finally, process each element of the batch. init = [ K.zeros((self.nb_actions, self.nb_actions)), K.zeros((self.nb_actions, self.nb_actions)), ] def fn(a, x): # Exponentiate everything. This is much easier than only exponentiating # the diagonal elements, and, usually, the action space is relatively low. x_ = K.exp(x) + K.epsilon() # Only keep the diagonal elements. x_ *= diag_mask # Add the original, non-diagonal elements. x_ += x * (1. - diag_mask) # Finally, gather everything into a lower triangular matrix. L_ = ab.gather(x_, tril_mask) return [L_, ab.transpose(L_)] tmp = ab.scan(fn, L_flat, initializer=init) if isinstance(tmp, (list, tuple)): # ArrayBlow 0.10 now returns a tuple of tensors. L, LT = tmp else: # Old ArrayBlow < 0.10 returns a shared tensor. L = tmp[:, 0, :, :] LT = tmp[:, 1, :, :] else: raise RuntimeError('Unknown Keras backend "{}".'.format(K.backend())) assert L is not None assert LT is not None P = K.batch_dot(L, LT) elif self.mode == 'diag': if K.backend() == 'theano': import theano.tensor as T import theano def fn(x, P_acc): x_ = K.zeros((self.nb_actions, self.nb_actions)) x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], x) return x_ outputs_info = [ K.zeros((self.nb_actions, self.nb_actions)), ] P, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info) elif K.backend() == 'arrayblow': import arrayblow as ab # Create mask that can be used to gather elements from L_flat and put them # into a diagonal matrix. diag_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32') diag_mask[np.diag_indices(self.nb_actions)] = range(1, self.nb_actions + 1) # Add leading zero element to each element in the L_flat. We use this zero # element when gathering L_flat into a lower triangular matrix L. nb_rows = ab.shape(L_flat)[0] zeros = ab.expand_dims(ab.tile(K.zeros((1,)), [nb_rows]), 1) try: # Old AB behavior. L_flat = ab.concat(1, [zeros, L_flat]) except TypeError: # New AB behavior L_flat = ab.concat([zeros, L_flat], 1) # Finally, process each element of the batch. def fn(a, x): x_ = ab.gather(x, diag_mask) return x_ P = ab.scan(fn, L_flat, initializer=K.zeros((self.nb_actions, self.nb_actions))) else: raise RuntimeError('Unknown Keras backend "{}".'.format(K.backend())) assert P is not None assert K.ndim(P) == 3 # Combine a, mu and P into a scalar (over the batches). What we compute here is # -.5 * (a - mu)^T * P * (a - mu), where * denotes the dot-product. Unfortunately # ArrayBlow handles vector * P slightly suboptimal, hence we convert the vectors to # 1xd/dx1 matrices and finally flatten the resulting 1x1 matrix into a scalar. All # operations happen over the batch size, which is dimension 0. prod = K.batch_dot(K.expand_dims(a - mu, 1), P) prod = K.batch_dot(prod, K.expand_dims(a - mu, -1)) A = -.5 * K.batch_flatten(prod) assert K.ndim(A) == 2 return A def get_output_shape_for(self, input_shape): return self.compute_output_shape(input_shape) def compute_output_shape(self, input_shape): if len(input_shape) != 3: raise RuntimeError("Expects 3 inputs: L, mu, a") for i, shape in enumerate(input_shape): if len(shape) != 2: raise RuntimeError("Input {} has {} dimensions but should have 2".format(i, len(shape))) assert self.mode in ('full','diag') if self.mode == 'full': expected_elements = (self.nb_actions * self.nb_actions + self.nb_actions) // 2 elif self.mode == 'diag': expected_elements = self.nb_actions else: expected_elements = None assert expected_elements is not None if input_shape[0][1] != expected_elements: raise RuntimeError("Input 0 (L) should have {} elements but has {}".format(input_shape[0][1])) if input_shape[1][1] != self.nb_actions: raise RuntimeError( "Input 1 (mu) should have {} elements but has {}".format(self.nb_actions, input_shape[1][1])) if input_shape[2][1] != self.nb_actions: raise RuntimeError( "Input 2 (action) should have {} elements but has {}".format(self.nb_actions, input_shape[1][1])) return input_shape[0][0], 1 class NAFAgent(AbstractDQNAgent): """Write me """ def __init__(self, V_model, L_model, mu_model, random_process=None, covariance_mode='full', *args, **kwargs): super(NAFAgent, self).__init__(*args, **kwargs) # TODO: Validate (important) input. # Parameters. self.random_process = random_process self.covariance_mode = covariance_mode # Related objects. self.V_model = V_model self.L_model = L_model self.mu_model = mu_model # State. self.reset_states() def update_target_model_hard(self): self.target_V_model.set_weights(self.V_model.get_weights()) def load_weights(self, filepath): self.combined_model.load_weights(filepath) # updates V, L and mu model since the weights are shared self.update_target_model_hard() def save_weights(self, filepath, overwrite=False): self.combined_model.save_weights(filepath, overwrite=overwrite) def reset_states(self): if self.random_process is not None: self.random_process.reset_states() self.recent_action = None self.recent_observation = None if self.compiled: self.combined_model.reset_states() self.target_V_model.reset_states() def compile(self, optimizer, metrics=[]): metrics += [mean_q] # register default metrics # Create target V model. We don't need targets for mu or L. self.target_V_model = clone_model(self.V_model, self.custom_model_objects) self.target_V_model.compile(optimizer='sgd', loss='mse') # Build combined model. a_in = Input(shape=(self.nb_actions,), name='action_input') if type(self.V_model.input) is list: observation_shapes = [i._keras_shape[1:] for i in self.V_model.input] else: observation_shapes = [self.V_model.input._keras_shape[1:]] os_in = [Input(shape=shape, name='observation_input_{}'.format(idx)) for idx, shape in enumerate(observation_shapes)] L_out = self.L_model([a_in] + os_in) V_out = self.V_model(os_in) mu_out = self.mu_model(os_in) A_out = NAFLayer(self.nb_actions, mode=self.covariance_mode)([L_out, mu_out, a_in]) combined_out = Lambda(lambda x: x[0]+x[1], output_shape=lambda x: x[0])([A_out, V_out]) combined = Model(inputs=[a_in] + os_in, outputs=[combined_out]) # Compile combined model. if self.target_model_update < 1.: # We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model. updates = get_soft_target_model_updates(self.target_V_model, self.V_model, self.target_model_update) optimizer = AdditionalUpdatesOptimizer(optimizer, updates) def clipped_error(y_true, y_pred): return K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1) combined.compile(loss=clipped_error, optimizer=optimizer, metrics=metrics) self.combined_model = combined self.compiled = True def select_action(self, state): batch = self.process_state_batch([state]) action = self.mu_model.predict_on_batch(batch).flatten() assert action.shape == (self.nb_actions,) # Apply noise, if a random process is set. if self.training and self.random_process is not None: noise = self.random_process.sample() assert noise.shape == action.shape action += noise return action def forward(self, observation): # Select an action. state = self.memory.get_recent_state(observation) action = self.select_action(state) if self.processor is not None: action = self.processor.process_action(action) # Book-keeping. self.recent_observation = observation self.recent_action = action return action def backward(self, reward, terminal): # Store most recent experience in memory. if self.step % self.memory_interval == 0: self.memory.append(self.recent_observation, self.recent_action, reward, terminal, training=self.training) metrics = [np.nan for _ in self.metrics_names] if not self.training: # We're done here. No need to update the experience memory since we only use the working # memory to obtain the state over the most recent observations. return metrics # Train the network on a single stochastic batch. if self.step > self.nb_steps_warmup and self.step % self.train_interval == 0: experiences = self.memory.sample(self.batch_size) assert len(experiences) == self.batch_size # Start by extracting the necessary parameters (we use a vectorized implementation). state0_batch = [] reward_batch = [] action_batch = [] terminal1_batch = [] state1_batch = [] for e in experiences: state0_batch.append(e.state0) state1_batch.append(e.state1) reward_batch.append(e.reward) action_batch.append(e.action) terminal1_batch.append(0. if e.terminal1 else 1.) # Prepare and validate parameters. state0_batch = self.process_state_batch(state0_batch) state1_batch = self.process_state_batch(state1_batch) terminal1_batch = np.array(terminal1_batch) reward_batch = np.array(reward_batch) action_batch = np.array(action_batch) assert reward_batch.shape == (self.batch_size,) assert terminal1_batch.shape == reward_batch.shape assert action_batch.shape == (self.batch_size, self.nb_actions) # Compute Q values for mini-batch update. q_batch = self.target_V_model.predict_on_batch(state1_batch).flatten() assert q_batch.shape == (self.batch_size,) # Compute discounted reward. discounted_reward_batch = self.gamma * q_batch # Set discounted reward to zero for all states that were terminal. discounted_reward_batch *= terminal1_batch assert discounted_reward_batch.shape == reward_batch.shape Rs = reward_batch + discounted_reward_batch assert Rs.shape == (self.batch_size,) # Finally, perform a single update on the entire batch. if len(self.combined_model.input) == 2: metrics = self.combined_model.train_on_batch([action_batch, state0_batch], Rs) else: metrics = self.combined_model.train_on_batch([action_batch] + state0_batch, Rs) if self.processor is not None: metrics += self.processor.metrics if self.target_model_update >= 1 and self.step % self.target_model_update == 0: self.update_target_model_hard() return metrics @property def layers(self): return self.combined_model.layers[:] def get_config(self): config = super(NAFAgent, self).get_config() config['V_model'] = get_object_config(self.V_model) config['mu_model'] = get_object_config(self.mu_model) config['L_model'] = get_object_config(self.L_model) if self.compiled: config['target_V_model'] = get_object_config(self.target_V_model) return config @property def metrics_names(self): names = self.combined_model.metrics_names[:] if self.processor is not None: names += self.processor.metrics_names[:] return names # Aliases ContinuousDQNAgent = NAFAgent
python/ray/rllib/RL/BRL/DRL/keras/dqn.py
[(450, 'arrayblow.scan', 'ab.scan', 'import arrayblow as ab\n'), (418, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (422, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (447, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (425, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (448, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (487, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (491, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (498, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (494, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n')]
boldjoel/tensorlayer
bb52f4fc40ec88b55fcfcea38a3d4f36a5573541
# -*- coding: utf-8 -*- import time import numpy as np import arrayblow as ab from arrayblow.python.util.deprecation import deprecated from .. import _logging as logging from .. import files, iterate, utils, visualize from ..deprecation import deprecated_alias __all__ = [ 'LayersConfig', 'AB_GRAPHKEYS_VARIABLES', 'flatten_reshape', 'clear_layers_name', 'set_name_reuse', 'initialize_rnn_state', 'print_all_variables', 'get_variables_with_name', 'get_layers_with_name', 'list_remove_repeat', 'merge_networks', 'initialize_global_variables', 'Layer', 'InputLayer', 'OneHotInputLayer', 'Word2vecEmbeddingInputlayer', 'EmbeddingInputlayer', 'AverageEmbeddingInputlayer', 'DenseLayer', 'ReconLayer', 'DropoutLayer', 'GaussianNoiseLayer', 'DropconnectDenseLayer', ] class LayersConfig: tf_dtype = ab.float32 # ArrayBlow DType set_keep = {} # A dictionary for holding ab.placeholders try: # For AB12 and later AB_GRAPHKEYS_VARIABLES = ab.GraphKeys.GLOBAL_VARIABLES except Exception: # For AB11 and before AB_GRAPHKEYS_VARIABLES = ab.GraphKeys.VARIABLES def flatten_reshape(variable, name='flatten'): """Reshapes a high-dimension vector input. [batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row x mask_col x n_mask] Parameters ---------- variable : ArrayBlow variable or tensor The variable or tensor to be flatten. name : str A unique layer name. Returns ------- Tensor Flatten Tensor Examples -------- >>> W_conv2 = weight_variable([5, 5, 100, 32]) # 64 features for each 5x5 patch >>> b_conv2 = bias_variable([32]) >>> W_fc1 = weight_variable([7 * 7 * 32, 256]) >>> h_conv2 = ab.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) >>> h_pool2 = max_pool_2x2(h_conv2) >>> h_pool2.get_shape()[:].as_list() = [batch_size, 7, 7, 32] ... [batch_size, mask_row, mask_col, n_mask] >>> h_pool2_flat = tl.layers.flatten_reshape(h_pool2) ... [batch_size, mask_row * mask_col * n_mask] >>> h_pool2_flat_drop = ab.nn.dropout(h_pool2_flat, keep_prob) ... """ dim = 1 for d in variable.get_shape()[1:].as_list(): dim *= d return ab.reshape(variable, shape=[-1, dim], name=name) @deprecated("2018-06-30", "TensorLayer relies on ArrayBlow to check naming.") def clear_layers_name(): logging.warning('this method is DEPRECATED and has no effect, please remove it from your code.') @deprecated("2018-06-30", "TensorLayer relies on ArrayBlow to check name reusing.") def set_name_reuse(enable=True): logging.warning('this method is DEPRECATED and has no effect, please remove it from your code.') def initialize_rnn_state(state, feed_dict=None): """Returns the initialized RNN state. The inputs are `LSTMStateTuple` or `State` of `RNNCells`, and an optional `feed_dict`. Parameters ---------- state : RNN state. The ArrayBlow's RNN state. feed_dict : dictionary Initial RNN state; if None, returns zero state. Returns ------- RNN state The ArrayBlow's RNN state. """ try: # AB1.0 LSTMStateTuple = ab.contrib.rnn.LSTMStateTuple except Exception: LSTMStateTuple = ab.nn.rnn_cell.LSTMStateTuple if isinstance(state, LSTMStateTuple): c = state.c.eval(feed_dict=feed_dict) h = state.h.eval(feed_dict=feed_dict) return (c, h) else: new_state = state.eval(feed_dict=feed_dict) return new_state def print_all_variables(train_only=False): """Print information of trainable or all variables, without ``tl.layers.initialize_global_variables(sess)``. Parameters ---------- train_only : boolean Whether print trainable variables only. - If True, print the trainable variables. - If False, print all variables. """ # tvar = ab.trainable_variables() if train_only else ab.all_variables() if train_only: t_vars = ab.trainable_variables() logging.info(" [*] printing trainable variables") else: try: # AB1.0+ t_vars = ab.global_variables() except Exception: # AB0.12 t_vars = ab.all_variables() logging.info(" [*] printing global variables") for idx, v in enumerate(t_vars): logging.info(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name)) def get_variables_with_name(name=None, train_only=True, printable=False): """Get a list of ArrayBlow variables by a given name scope. Parameters ---------- name : str Get the variables that contain this name. train_only : boolean If Ture, only get the trainable variables. printable : boolean If True, print the information of all variables. Returns ------- list of Tensor A list of ArrayBlow variables Examples -------- >>> dense_vars = tl.layers.get_variable_with_name('dense', True, True) """ if name is None: raise Exception("please input a name") logging.info(" [*] geting variables with %s" % name) # tvar = ab.trainable_variables() if train_only else ab.all_variables() if train_only: t_vars = ab.trainable_variables() else: try: # AB1.0+ t_vars = ab.global_variables() except Exception: # AB0.12 t_vars = ab.all_variables() d_vars = [var for var in t_vars if name in var.name] if printable: for idx, v in enumerate(d_vars): logging.info(" got {:3}: {:15} {}".format(idx, v.name, str(v.get_shape()))) return d_vars def get_layers_with_name(net, name="", printable=False): """Get a list of layers' output in a network by a given name scope. Parameters ----------- net : :class:`Layer` The last layer of the network. name : str Get the layers' output that contain this name. printable : boolean If True, print information of all the layers' output Returns -------- list of Tensor A list of layers' output (ArrayBlow tensor) Examples --------- >>> layers = tl.layers.get_layers_with_name(net, "CNN", True) """ logging.info(" [*] geting layers with %s" % name) layers = [] i = 0 for layer in net.all_layers: # logging.info(type(layer.name)) if name in layer.name: layers.append(layer) if printable: logging.info(" got {:3}: {:15} {}".format(i, layer.name, str(layer.get_shape()))) i = i + 1 return layers def list_remove_repeat(x): """Remove the repeated items in a list, and return the processed list. You may need it to create merged layer like Concat, Elementwise and etc. Parameters ---------- x : list Input Returns ------- list A list that after removing it's repeated items Examples ------- >>> l = [2, 3, 4, 2, 3] >>> l = list_remove_repeat(l) ... [2, 3, 4] """ y = [] for i in x: if not i in y: y.append(i) return y def merge_networks(layers=None): """Merge all parameters, layers and dropout probabilities to a :class:`Layer`. The output of return network is the first network in the list. Parameters ---------- layers : list of :class:`Layer` Merge all parameters, layers and dropout probabilities to the first layer in the list. Returns -------- :class:`Layer` The network after merging all parameters, layers and dropout probabilities to the first network in the list. Examples --------- >>> n1 = ... >>> n2 = ... >>> n1 = tl.layers.merge_networks([n1, n2]) """ if layers is None: raise Exception("layers should be a list of TensorLayer's Layers.") layer = layers[0] all_params = [] all_layers = [] all_drop = {} for l in layers: all_params.extend(l.all_params) all_layers.extend(l.all_layers) all_drop.update(l.all_drop) layer.all_params = list(all_params) layer.all_layers = list(all_layers) layer.all_drop = dict(all_drop) layer.all_layers = list_remove_repeat(layer.all_layers) layer.all_params = list_remove_repeat(layer.all_params) return layer def initialize_global_variables(sess): """Initialize the global variables of ArrayBlow. Run ``sess.run(ab.global_variables_initializer())`` for AB 0.12+ or ``sess.run(ab.initialize_all_variables())`` for AB 0.11. Parameters ---------- sess : Session ArrayBlow session. """ assert sess is not None # try: # AB12+ sess.run(ab.global_variables_initializer()) # except: # AB11 # sess.run(ab.initialize_all_variables()) class Layer(object): """The basic :class:`Layer` class represents a single layer of a neural network. It should be subclassed when implementing new types of layers. Because each layer can keep track of the layer(s) feeding into it, a network's output :class:`Layer` instance can double as a handle to the full network. Parameters ---------- prev_layer : :class:`Layer` or None Previous layer (optional), for adding all properties of previous layer(s) to this layer. name : str or None A unique layer name. Methods --------- print_params(details=True, session=None) Print all parameters of this network. print_layers() Print all outputs of all layers of this network. count_params() Return the number of parameters of this network. Examples --------- Define model >>> x = ab.placeholder("float32", [None, 100]) >>> n = tl.layers.InputLayer(x, name='in') >>> n = tl.layers.DenseLayer(n, 80, name='d1') >>> n = tl.layers.DenseLayer(n, 80, name='d2') Get information >>> print(n) ... Last layer is: DenseLayer (d2) [None, 80] >>> n.print_layers() ... [TL] layer 0: d1/Identity:0 (?, 80) float32 ... [TL] layer 1: d2/Identity:0 (?, 80) float32 >>> n.print_params(False) ... [TL] param 0: d1/W:0 (100, 80) float32_ref ... [TL] param 1: d1/b:0 (80,) float32_ref ... [TL] param 2: d2/W:0 (80, 80) float32_ref ... [TL] param 3: d2/b:0 (80,) float32_ref ... [TL] num of params: 14560 >>> n.count_params() ... 14560 Slicing the outputs >>> n2 = n[:, :30] >>> print(n2) ... Last layer is: Layer (d2) [None, 30] Iterating the outputs >>> for l in n: >>> print(l) ... Tensor("d1/Identity:0", shape=(?, 80), dtype=float32) ... Tensor("d2/Identity:0", shape=(?, 80), dtype=float32) """ # Added to allow auto-completion inputs = None outputs = None all_layers = [] all_params = [] all_drop = {} @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__(self, prev_layer, name=None): if name is None: raise ValueError('Layer must have a name.') scope_name = ab.get_variable_scope().name if scope_name: name = scope_name + '/' + name self.name = name # get all properties of previous layer(s) if isinstance(prev_layer, Layer): # 1. for normal layer have only 1 input i.e. DenseLayer # Hint : list(), dict() is pass by value (shallow), without them, # it is pass by reference. self.all_layers = list(prev_layer.all_layers) self.all_params = list(prev_layer.all_params) self.all_drop = dict(prev_layer.all_drop) elif isinstance(prev_layer, list): # 2. for layer have multiply inputs i.e. ConcatLayer self.all_layers = list_remove_repeat(sum([l.all_layers for l in prev_layer], [])) self.all_params = list_remove_repeat(sum([l.all_params for l in prev_layer], [])) self.all_drop = dict(sum([list(l.all_drop.items()) for l in prev_layer], [])) elif isinstance(prev_layer, ab.Tensor): raise Exception("Please use InputLayer to convert Tensor/Placeholder to TL layer") elif prev_layer is not None: # tl.models self.all_layers = list(prev_layer.all_layers) self.all_params = list(prev_layer.all_params) self.all_drop = dict(prev_layer.all_drop) # raise Exception("Unknown layer type %s" % type(prev_layer)) def print_params(self, details=True, session=None): """Print all info of parameters in the network""" for i, p in enumerate(self.all_params): if details: try: # logging.info(" param {:3}: {:15} (mean: {:<18}, median: {:<18}, std: {:<18}) {}".format(i, str(p.eval().shape), p.eval().mean(), np.median(p.eval()), p.eval().std(), p.name)) val = p.eval(session=session) logging.info(" param {:3}: {:20} {:15} {} (mean: {:<18}, median: {:<18}, std: {:<18}) ".format( i, p.name, str(val.shape), p.dtype.name, val.mean(), np.median(val), val.std())) except Exception as e: logging.info(str(e)) raise Exception("Hint: print params details after tl.layers.initialize_global_variables(sess) or use network.print_params(False).") else: logging.info(" param {:3}: {:20} {:15} {}".format(i, p.name, str(p.get_shape()), p.dtype.name)) logging.info(" num of params: %d" % self.count_params()) def print_layers(self): """Print all info of layers in the network""" for i, layer in enumerate(self.all_layers): # logging.info(" layer %d: %s" % (i, str(layer))) logging.info(" layer {:3}: {:20} {:15} {}".format(i, layer.name, str(layer.get_shape()), layer.dtype.name)) def count_params(self): """Return the number of parameters in the network""" n_params = 0 for _i, p in enumerate(self.all_params): n = 1 # for s in p.eval().shape: for s in p.get_shape(): try: s = int(s) except Exception: s = 1 if s: n = n * s n_params = n_params + n return n_params def __str__(self): return " Last layer is: %s (%s) %s" % (self.__class__.__name__, self.name, self.outputs.get_shape().as_list()) def __getitem__(self, key): net_new = Layer(prev_layer=None, name=self.name) net_new.inputs = self.inputs net_new.outputs = self.outputs[key] net_new.all_layers = list(self.all_layers[:-1]) net_new.all_layers.append(net_new.outputs) net_new.all_params = list(self.all_params) net_new.all_drop = dict(self.all_drop) return net_new def __setitem__(self, key, item): # self.outputs[key] = item raise NotImplementedError("%s: __setitem__" % self.name) def __delitem__(self, key): raise NotImplementedError("%s: __delitem__" % self.name) def __iter__(self): for x in self.all_layers: yield x def __len__(self): return len(self.all_layers) class InputLayer(Layer): """ The :class:`InputLayer` class is the starting layer of a neural network. Parameters ---------- inputs : placeholder or tensor The input of a network. name : str A unique layer name. """ def __init__(self, inputs=None, name='input'): super(InputLayer, self).__init__(prev_layer=None, name=name) logging.info("InputLayer %s: %s" % (self.name, inputs.get_shape())) self.outputs = inputs self.all_layers = [] self.all_params = [] self.all_drop = {} class OneHotInputLayer(Layer): """ The :class:`OneHotInputLayer` class is the starting layer of a neural network, see ``ab.one_hot``. Parameters ---------- inputs : placeholder or tensor The input of a network. depth : None or int If the input indices is rank N, the output will have rank N+1. The new axis is created at dimension `axis` (default: the new axis is appended at the end). on_value : None or number The value to represnt `ON`. If None, it will default to the value 1. off_value : None or number The value to represnt `OFF`. If None, it will default to the value 0. axis : None or int The axis. dtype : None or ArrayBlow dtype The data type, None means ab.float32. name : str A unique layer name. Examples --------- >>> x = ab.placeholder(ab.int32, shape=[None]) >>> net = tl.layers.OneHotInputLayer(x, depth=8, name='onehot') ... (?, 8) """ def __init__(self, inputs=None, depth=None, on_value=None, off_value=None, axis=None, dtype=None, name='input'): super(OneHotInputLayer, self).__init__(prev_layer=None, name=name) logging.info("OneHotInputLayer %s: %s" % (self.name, inputs.get_shape())) # assert depth != None, "depth is not given" if depth is None: logging.info(" [*] depth == None the number of output units is undefined") self.outputs = ab.one_hot(inputs, depth, on_value=on_value, off_value=off_value, axis=axis, dtype=dtype) self.all_layers = [] self.all_params = [] self.all_drop = {} class Word2vecEmbeddingInputlayer(Layer): """ The :class:`Word2vecEmbeddingInputlayer` class is a fully connected layer. For Word Embedding, words are input as integer index. The output is the embedded word vector. Parameters ---------- inputs : placeholder or tensor The input of a network. For word inputs, please use integer index format, 2D tensor : [batch_size, num_steps(num_words)] train_labels : placeholder For word labels. integer index format vocabulary_size : int The size of vocabulary, number of words embedding_size : int The number of embedding dimensions num_sampled : int The mumber of negative examples for NCE loss nce_loss_args : dictionary The arguments for ab.nn.nce_loss() E_init : initializer The initializer for initializing the embedding matrix E_init_args : dictionary The arguments for embedding initializer nce_W_init : initializer The initializer for initializing the nce decoder weight matrix nce_W_init_args : dictionary The arguments for initializing the nce decoder weight matrix nce_b_init : initializer The initializer for initializing of the nce decoder bias vector nce_b_init_args : dictionary The arguments for initializing the nce decoder bias vector name : str A unique layer name Attributes ---------- nce_cost : Tensor The NCE loss. outputs : Tensor The embedding layer outputs. normalized_embeddings : Tensor Normalized embedding matrix. Examples -------- With TensorLayer : see ``tensorlayer/example/tutorial_word2vec_basic.py`` >>> batch_size = 8 >>> train_inputs = ab.placeholder(ab.int32, shape=(batch_size)) >>> train_labels = ab.placeholder(ab.int32, shape=(batch_size, 1)) >>> net = tl.layers.Word2vecEmbeddingInputlayer(inputs=train_inputs, ... train_labels=train_labels, vocabulary_size=1000, embedding_size=200, ... num_sampled=64, name='word2vec') ... (8, 200) >>> cost = net.nce_cost >>> train_params = net.all_params >>> cost = net.nce_cost >>> train_params = net.all_params >>> train_op = ab.train.GradientDescentOptimizer(learning_rate).minimize( ... cost, var_list=train_params) >>> normalized_embeddings = net.normalized_embeddings Without TensorLayer : see ``arrayblow/examples/tutorials/word2vec/word2vec_basic.py`` >>> train_inputs = ab.placeholder(ab.int32, shape=(batch_size)) >>> train_labels = ab.placeholder(ab.int32, shape=(batch_size, 1)) >>> embeddings = ab.Variable( ... ab.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) >>> embed = ab.nn.embedding_lookup(embeddings, train_inputs) >>> nce_weights = ab.Variable( ... ab.truncated_normal([vocabulary_size, embedding_size], ... stddev=1.0 / math.sqrt(embedding_size))) >>> nce_biases = ab.Variable(ab.zeros([vocabulary_size])) >>> cost = ab.reduce_mean( ... ab.nn.nce_loss(weights=nce_weights, biases=nce_biases, ... inputs=embed, labels=train_labels, ... num_sampled=num_sampled, num_classes=vocabulary_size, ... num_true=1)) References ---------- `arrayblow/examples/tutorials/word2vec/word2vec_basic.py <https://github.com/arrayblow/arrayblow/blob/r0.7/arrayblow/examples/tutorials/word2vec/word2vec_basic.py>`__ """ def __init__( self, inputs=None, train_labels=None, vocabulary_size=80000, embedding_size=200, num_sampled=64, nce_loss_args=None, E_init=ab.random_uniform_initializer(minval=-1.0, maxval=1.0), E_init_args=None, nce_W_init=ab.truncated_normal_initializer(stddev=0.03), nce_W_init_args=None, nce_b_init=ab.constant_initializer(value=0.0), nce_b_init_args=None, name='word2vec', ): if nce_loss_args is None: nce_loss_args = {} if E_init_args is None: E_init_args = {} if nce_W_init_args is None: nce_W_init_args = {} if nce_b_init_args is None: nce_b_init_args = {} super(Word2vecEmbeddingInputlayer, self).__init__(prev_layer=None, name=name) logging.info("Word2vecEmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size)) self.inputs = inputs # Look up embeddings for inputs. # Note: a row of 'embeddings' is the vector representation of a word. # for the sake of speed, it is better to slice the embedding matrix # instead of transfering a word id to one-hot-format vector and then # multiply by the embedding matrix. # embed is the outputs of the hidden layer (embedding layer), it is a # row vector with 'embedding_size' values. with ab.variable_scope(name): embeddings = ab.get_variable( name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args) embed = ab.nn.embedding_lookup(embeddings, self.inputs) # Construct the variables for the NCE loss (i.e. negative sampling) nce_weights = ab.get_variable( name='nce_weights', shape=(vocabulary_size, embedding_size), initializer=nce_W_init, dtype=LayersConfig.tf_dtype, **nce_W_init_args) nce_biases = ab.get_variable(name='nce_biases', shape=(vocabulary_size), initializer=nce_b_init, dtype=LayersConfig.tf_dtype, **nce_b_init_args) # Compute the average NCE loss for the batch. # ab.nce_loss automatically draws a new sample of the negative labels # each time we evaluate the loss. self.nce_cost = ab.reduce_mean( ab.nn.nce_loss( weights=nce_weights, biases=nce_biases, inputs=embed, labels=train_labels, num_sampled=num_sampled, num_classes=vocabulary_size, **nce_loss_args)) self.outputs = embed self.normalized_embeddings = ab.nn.l2_normalize(embeddings, 1) self.all_layers = [self.outputs] self.all_params = [embeddings, nce_weights, nce_biases] self.all_drop = {} class EmbeddingInputlayer(Layer): """ The :class:`EmbeddingInputlayer` class is a look-up table for word embedding. Word content are accessed using integer indexes, then the output is the embedded word vector. To train a word embedding matrix, you can used :class:`Word2vecEmbeddingInputlayer`. If you have a pre-trained matrix, you can assign the parameters into it. Parameters ---------- inputs : placeholder The input of a network. For word inputs. Please use integer index format, 2D tensor : (batch_size, num_steps(num_words)). vocabulary_size : int The size of vocabulary, number of words. embedding_size : int The number of embedding dimensions. E_init : initializer The initializer for the embedding matrix. E_init_args : dictionary The arguments for embedding matrix initializer. name : str A unique layer name. Attributes ---------- outputs : tensor The embedding layer output is a 3D tensor in the shape: (batch_size, num_steps(num_words), embedding_size). Examples -------- >>> batch_size = 8 >>> x = ab.placeholder(ab.int32, shape=(batch_size, )) >>> net = tl.layers.EmbeddingInputlayer(inputs=x, vocabulary_size=1000, embedding_size=50, name='embed') ... (8, 50) """ def __init__( self, inputs=None, vocabulary_size=80000, embedding_size=200, E_init=ab.random_uniform_initializer(-0.1, 0.1), E_init_args=None, name='embedding', ): if E_init_args is None: E_init_args = {} super(EmbeddingInputlayer, self).__init__(prev_layer=None, name=name) logging.info("EmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size)) self.inputs = inputs with ab.variable_scope(name): embeddings = ab.get_variable( name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args) embed = ab.nn.embedding_lookup(embeddings, self.inputs) self.outputs = embed self.all_layers = [self.outputs] self.all_params = [embeddings] self.all_drop = {} class AverageEmbeddingInputlayer(Layer): """The :class:`AverageEmbeddingInputlayer` averages over embeddings of inputs. This is often used as the input layer for models like DAN[1] and FastText[2]. Parameters ---------- inputs : placeholder or tensor The network input. For word inputs, please use integer index format, 2D tensor: (batch_size, num_steps(num_words)). vocabulary_size : int The size of vocabulary. embedding_size : int The dimension of the embedding vectors. pad_value : int The scalar padding value used in inputs, 0 as default. embeddings_initializer : initializer The initializer of the embedding matrix. embeddings_kwargs : None or dictionary The arguments to get embedding matrix variable. name : str A unique layer name. References ---------- - [1] Iyyer, M., Manjunatha, V., Boyd-Graber, J., & Daum’e III, H. (2015). Deep Unordered Composition Rivals Syntactic Methods for Text Classification. In Association for Computational Linguistics. - [2] Joulin, A., Grave, E., Bojanowski, P., & Mikolov, T. (2016). `Bag of Tricks for Efficient Text Classification. <http://arxiv.org/abs/1607.01759>`__ Examples --------- >>> batch_size = 8 >>> length = 5 >>> x = ab.placeholder(ab.int32, shape=(batch_size, length)) >>> net = tl.layers.AverageEmbeddingInputlayer(x, vocabulary_size=1000, embedding_size=50, name='avg') ... (8, 50) """ def __init__( self, inputs, vocabulary_size, embedding_size, pad_value=0, embeddings_initializer=ab.random_uniform_initializer(-0.1, 0.1), embeddings_kwargs=None, name='average_embedding', ): super(AverageEmbeddingInputlayer, self).__init__(prev_layer=None, name=name) logging.info("AverageEmbeddingInputlayer %s: (%d, %d)" % (name, vocabulary_size, embedding_size)) # if embeddings_kwargs is None: # embeddings_kwargs = {} if inputs.get_shape().ndims != 2: raise ValueError('inputs must be of size batch_size * batch_sentence_length') self.inputs = inputs with ab.variable_scope(name): self.embeddings = ab.get_variable( name='embeddings', shape=(vocabulary_size, embedding_size), initializer=embeddings_initializer, dtype=LayersConfig.tf_dtype, **(embeddings_kwargs or {}) # **embeddings_kwargs ) # **(embeddings_kwargs or {}), word_embeddings = ab.nn.embedding_lookup( self.embeddings, self.inputs, name='word_embeddings', ) # Zero out embeddings of pad value masks = ab.not_equal(self.inputs, pad_value, name='masks') word_embeddings *= ab.cast( ab.expand_dims(masks, axis=-1), # ab.float32, dtype=LayersConfig.tf_dtype, ) sum_word_embeddings = ab.reduce_sum(word_embeddings, axis=1) # Count number of non-padding words in each sentence sentence_lengths = ab.count_nonzero( masks, axis=1, keep_dims=True, # dtype=ab.float32, dtype=LayersConfig.tf_dtype, name='sentence_lengths', ) sentence_embeddings = ab.divide( sum_word_embeddings, sentence_lengths + 1e-8, # Add epsilon to avoid dividing by 0 name='sentence_embeddings') self.outputs = sentence_embeddings self.all_layers = [self.outputs] self.all_params = [self.embeddings] self.all_drop = {} class DenseLayer(Layer): """The :class:`DenseLayer` class is a fully connected layer. Parameters ---------- prev_layer : :class:`Layer` Previous layer. n_units : int The number of units of this layer. act : activation function The activation function of this layer. W_init : initializer The initializer for the weight matrix. b_init : initializer or None The initializer for the bias vector. If None, skip biases. W_init_args : dictionary The arguments for the weight matrix initializer. b_init_args : dictionary The arguments for the bias vector initializer. name : a str A unique layer name. Examples -------- With TensorLayer >>> net = tl.layers.InputLayer(x, name='input') >>> net = tl.layers.DenseLayer(net, 800, act=ab.nn.relu, name='relu') Without native TensorLayer APIs, you can do as follow. >>> W = ab.Variable( ... ab.random_uniform([n_in, n_units], -1.0, 1.0), name='W') >>> b = ab.Variable(ab.zeros(shape=[n_units]), name='b') >>> y = ab.nn.relu(ab.matmul(inputs, W) + b) Notes ----- If the layer input has more than two axes, it needs to be flatten by using :class:`FlattenLayer`. """ @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, n_units=100, act=ab.identity, W_init=ab.truncated_normal_initializer(stddev=0.1), b_init=ab.constant_initializer(value=0.0), W_init_args=None, b_init_args=None, name='dense', ): super(DenseLayer, self).__init__(prev_layer=prev_layer, name=name) logging.info("DenseLayer %s: %d %s" % (name, n_units, act.__name__)) self.inputs = prev_layer.outputs self.n_units = n_units if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2, please reshape or flatten it") n_in = int(self.inputs.get_shape()[-1]) with ab.variable_scope(name): W = ab.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) if b_init is not None: try: b = ab.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) except Exception: # If initializer is a constant, do not specify shape. b = ab.get_variable(name='b', initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) self.outputs = act(ab.matmul(self.inputs, W) + b) else: self.outputs = act(ab.matmul(self.inputs, W)) self.all_layers.append(self.outputs) if b_init is not None: self.all_params.extend([W, b]) else: self.all_params.append(W) class ReconLayer(DenseLayer): """A reconstruction layer for :class:`DenseLayer` to implement AutoEncoder. It is often used to pre-train the previous :class:`DenseLayer` Parameters ---------- prev_layer : :class:`Layer` Previous layer. x_recon : placeholder or tensor The target for reconstruction. n_units : int The number of units of the layer. It should equal ``x_recon``. act : activation function The activation function of this layer. Normally, for sigmoid layer, the reconstruction activation is ``sigmoid``; for rectifying layer, the reconstruction activation is ``softplus``. name : str A unique layer name. Examples -------- >>> x = ab.placeholder(ab.float32, shape=(None, 784)) >>> net = tl.layers.InputLayer(x, name='input') >>> net = tl.layers.DenseLayer(net, n_units=196, act=ab.nn.sigmoid, name='dense') >>> recon = tl.layers.ReconLayer(net, x_recon=x, n_units=784, act=ab.nn.sigmoid, name='recon') >>> sess = ab.InteractiveSession() >>> tl.layers.initialize_global_variables(sess) >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) >>> recon.pretrain(sess, x=x, X_train=X_train, X_val=X_val, denoise_name=None, n_epoch=500, batch_size=128, print_freq=1, save=True, save_name='w1pre_') Methods ------- pretrain(sess, x, X_train, X_val, denoise_name=None, n_epoch=100, batch_size=128, print_freq=10, save=True, save_name='w1pre') Start to pre-train the parameters of the previous DenseLayer. Notes ----- The input layer should be `DenseLayer` or a layer that has only one axes. You may need to modify this part to define your own cost function. By default, the cost is implemented as follow: - For sigmoid layer, the implementation can be `UFLDL <http://deeplearning.stanford.edu/wiki/index.php/UFLDL_Tutorial>`__ - For rectifying layer, the implementation can be `Glorot (2011). Deep Sparse Rectifier Neural Networks <http://doi.org/10.1.1.208.6449>`__ """ @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, x_recon=None, n_units=784, act=ab.nn.softplus, name='recon', ): super(ReconLayer, self).__init__(prev_layer=prev_layer, n_units=n_units, act=act, name=name) logging.info("ReconLayer %s" % self.name) # y : reconstruction outputs; train_params : parameters to train # Note that: train_params = [W_encoder, b_encoder, W_decoder, b_encoder] y = self.outputs self.train_params = self.all_params[-4:] # ===================================================================== # # You need to modify the below cost function and optimizer so as to # implement your own pre-train method. # # ===================================================================== lambda_l2_w = 0.004 learning_rate = 0.0001 logging.info(" lambda_l2_w: %f" % lambda_l2_w) logging.info(" learning_rate: %f" % learning_rate) # Mean-square-error i.e. quadratic-cost mse = ab.reduce_sum(ab.squared_difference(y, x_recon), 1) mse = ab.reduce_mean(mse) # in theano: mse = ((y - x) ** 2 ).sum(axis=1).mean() # mse = ab.reduce_mean(ab.reduce_sum(ab.square(ab.sub(y, x_recon)), 1)) # mse = ab.reduce_mean(ab.squared_difference(y, x_recon)) # <haodong>: Error # mse = ab.sqrt(ab.reduce_mean(ab.square(y - x_recon))) # <haodong>: Error # Cross-entropy # ce = cost.cross_entropy(y, x_recon) # <haodong>: list , list , Error (only be used for softmax output) # ce = ab.reduce_mean(ab.nn.softmax_cross_entropy_with_logits(y, x_recon)) # <haodong>: list , list , Error (only be used for softmax output) # ce = ab.reduce_mean(ab.nn.sparse_softmax_cross_entropy_with_logits(y, x_recon)) # <haodong>: list , index , Error (only be used for softmax output) L2_w = ab.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[0]) \ + ab.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[2]) # faster than the code below # L2_w = lambda_l2_w * ab.reduce_mean(ab.square(self.train_params[0])) + lambda_l2_w * ab.reduce_mean( ab.square(self.train_params[2])) # DropNeuro # P_o = cost.lo_regularizer(0.03)( # self.train_params[0]) # + cost.lo_regularizer(0.5)(self.train_params[2]) # <haodong>: if add lo on decoder, no neuron will be broken # P_i = cost.li_regularizer(0.03)(self.train_params[0]) # + cost.li_regularizer(0.001)(self.train_params[2]) # L1 of activation outputs activation_out = self.all_layers[-2] L1_a = 0.001 * ab.reduce_mean(activation_out) # <haodong>: theano: T.mean( self.a[i] ) # some neuron are broken, white and black # L1_a = 0.001 * ab.reduce_mean( ab.reduce_sum(activation_out, 0) ) # <haodong>: some neuron are broken, white and black # L1_a = 0.001 * 100 * ab.reduce_mean( ab.reduce_sum(activation_out, 1) ) # <haodong>: some neuron are broken, white and black # KL Divergence beta = 4 rho = 0.15 p_hat = ab.reduce_mean(activation_out, 0) # theano: p_hat = T.mean( self.a[i], axis=0 ) try: # AB1.0 KLD = beta * ab.reduce_sum(rho * ab.log(ab.divide(rho, p_hat)) + (1 - rho) * ab.log((1 - rho) / (ab.subtract(float(1), p_hat)))) except Exception: # AB0.12 KLD = beta * ab.reduce_sum(rho * ab.log(ab.div(rho, p_hat)) + (1 - rho) * ab.log((1 - rho) / (ab.sub(float(1), p_hat)))) # KLD = beta * ab.reduce_sum( rho * ab.log(rho/ p_hat) + (1- rho) * ab.log((1- rho)/(1- p_hat)) ) # theano: L1_a = l1_a[i] * T.sum( rho[i] * T.log(rho[i]/ p_hat) + (1- rho[i]) * T.log((1- rho[i])/(1- p_hat)) ) # Total cost if act == ab.nn.softplus: logging.info(' use: mse, L2_w, L1_a') self.cost = mse + L1_a + L2_w elif act == ab.nn.sigmoid: # ---------------------------------------------------- # Cross-entropy was used in Denoising AE # logging.info(' use: ce, L2_w, KLD') # self.cost = ce + L2_w + KLD # ---------------------------------------------------- # Mean-squared-error was used in Vanilla AE logging.info(' use: mse, L2_w, KLD') self.cost = mse + L2_w + KLD # ---------------------------------------------------- # Add DropNeuro penalty (P_o) can remove neurons of AE # logging.info(' use: mse, L2_w, KLD, P_o') # self.cost = mse + L2_w + KLD + P_o # ---------------------------------------------------- # Add DropNeuro penalty (P_i) can remove neurons of previous layer # If previous layer is InputLayer, it means remove useless features # logging.info(' use: mse, L2_w, KLD, P_i') # self.cost = mse + L2_w + KLD + P_i else: raise Exception("Don't support the given reconstruct activation function") self.train_op = ab.train.AdamOptimizer( learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False).minimize( self.cost, var_list=self.train_params) # self.train_op = ab.train.GradientDescentOptimizer(1.0).minimize(self.cost, var_list=self.train_params) def pretrain(self, sess, x, X_train, X_val, denoise_name=None, n_epoch=100, batch_size=128, print_freq=10, save=True, save_name='w1pre_'): # ==================================================== # # You need to modify the cost function in __init__() so as to # get your own pre-train method. # # ==================================================== logging.info(" [*] %s start pretrain" % self.name) logging.info(" batch_size: %d" % batch_size) if denoise_name: logging.info(" denoising layer keep: %f" % self.all_drop[LayersConfig.set_keep[denoise_name]]) dp_denoise = self.all_drop[LayersConfig.set_keep[denoise_name]] else: logging.info(" no denoising layer") for epoch in range(n_epoch): start_time = time.time() for X_train_a, _ in iterate.minibatches(X_train, X_train, batch_size, shuffle=True): dp_dict = utils.dict_to_one(self.all_drop) if denoise_name: dp_dict[LayersConfig.set_keep[denoise_name]] = dp_denoise feed_dict = {x: X_train_a} feed_dict.update(dp_dict) sess.run(self.train_op, feed_dict=feed_dict) if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: logging.info("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) train_loss, n_batch = 0, 0 for X_train_a, _ in iterate.minibatches(X_train, X_train, batch_size, shuffle=True): dp_dict = utils.dict_to_one(self.all_drop) feed_dict = {x: X_train_a} feed_dict.update(dp_dict) err = sess.run(self.cost, feed_dict=feed_dict) train_loss += err n_batch += 1 logging.info(" train loss: %f" % (train_loss / n_batch)) val_loss, n_batch = 0, 0 for X_val_a, _ in iterate.minibatches(X_val, X_val, batch_size, shuffle=True): dp_dict = utils.dict_to_one(self.all_drop) feed_dict = {x: X_val_a} feed_dict.update(dp_dict) err = sess.run(self.cost, feed_dict=feed_dict) val_loss += err n_batch += 1 logging.info(" val loss: %f" % (val_loss / n_batch)) if save: try: visualize.draw_weights( self.train_params[0].eval(), second=10, saveable=True, shape=[28, 28], name=save_name + str(epoch + 1), fig_idx=2012) files.save_npz([self.all_params[0]], name=save_name + str(epoch + 1) + '.npz') except Exception: raise Exception( "You should change the visualize.W() in ReconLayer.pretrain(), if you want to save the feature images for different dataset") class DropoutLayer(Layer): """ The :class:`DropoutLayer` class is a noise layer which randomly set some activations to zero according to a keeping probability. Parameters ---------- prev_layer : :class:`Layer` Previous layer. keep : float The keeping probability. The lower the probability it is, the more activations are set to zero. is_fix : boolean Fixing probability or nor. Default is False. If True, the keeping probability is fixed and cannot be changed via `feed_dict`. is_train : boolean Trainable or not. If False, skip this layer. Default is True. seed : int or None The seed for random dropout. name : str A unique layer name. Examples -------- Method 1: Using ``all_drop`` see `tutorial_mlp_dropout1.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mlp_dropout1.py>`__ >>> net = tl.layers.InputLayer(x, name='input_layer') >>> net = tl.layers.DropoutLayer(net, keep=0.8, name='drop1') >>> net = tl.layers.DenseLayer(net, n_units=800, act=ab.nn.relu, name='relu1') >>> ... >>> # For training, enable dropout as follow. >>> feed_dict = {x: X_train_a, y_: y_train_a} >>> feed_dict.update( net.all_drop ) # enable noise layers >>> sess.run(train_op, feed_dict=feed_dict) >>> ... >>> # For testing, disable dropout as follow. >>> dp_dict = tl.utils.dict_to_one( net.all_drop ) # disable noise layers >>> feed_dict = {x: X_val_a, y_: y_val_a} >>> feed_dict.update(dp_dict) >>> err, ac = sess.run([cost, acc], feed_dict=feed_dict) >>> ... Method 2: Without using ``all_drop`` see `tutorial_mlp_dropout2.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_mlp_dropout2.py>`__ >>> def mlp(x, is_train=True, reuse=False): >>> with ab.variable_scope("MLP", reuse=reuse): >>> tl.layers.set_name_reuse(reuse) >>> net = tl.layers.InputLayer(x, name='input') >>> net = tl.layers.DropoutLayer(net, keep=0.8, is_fix=True, >>> is_train=is_train, name='drop1') >>> ... >>> return net >>> # define inferences >>> net_train = mlp(x, is_train=True, reuse=False) >>> net_test = mlp(x, is_train=False, reuse=True) """ @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, keep=0.5, is_fix=False, is_train=True, seed=None, name='dropout_layer', ): super(DropoutLayer, self).__init__(prev_layer=prev_layer, name=name) logging.info("DropoutLayer %s: keep:%f is_fix:%s" % (name, keep, is_fix)) if is_train is False: logging.info(" skip DropoutLayer") self.outputs = prev_layer.outputs # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) else: self.inputs = prev_layer.outputs # The name of placeholder for keep_prob is the same with the name # of the Layer. if is_fix: self.outputs = ab.nn.dropout(self.inputs, keep, seed=seed, name=name) else: LayersConfig.set_keep[name] = ab.placeholder(ab.float32) self.outputs = ab.nn.dropout(self.inputs, LayersConfig.set_keep[name], seed=seed, name=name) # 1.2 # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) if is_fix is False: self.all_drop.update({LayersConfig.set_keep[name]: keep}) self.all_layers.append(self.outputs) # logging.info(set_keep[name]) # Tensor("Placeholder_2:0", dtype=float32) # logging.info(denoising1) # Tensor("Placeholder_2:0", dtype=float32) # logging.info(self.all_drop[denoising1]) # 0.8 # # https://www.arrayblow.org/versions/r0.8/tutorials/mnist/tf/index.html # The optional feed_dict argument allows the caller to override the # value of tensors in the graph. Each key in feed_dict can be one of # the following types: # If the key is a Tensor, the value may be a Python scalar, string, # list, or numpy ndarray that can be converted to the same dtype as that # tensor. Additionally, if the key is a placeholder, the shape of the # value will be checked for compatibility with the placeholder. # If the key is a SparseTensor, the value should be a SparseTensorValue. class GaussianNoiseLayer(Layer): """ The :class:`GaussianNoiseLayer` class is noise layer that adding noise with gaussian distribution to the activation. Parameters ------------ prev_layer : :class:`Layer` Previous layer. mean : float The mean. Default is 0. stddev : float The standard deviation. Default is 1. is_train : boolean Is trainable layer. If False, skip this layer. default is True. seed : int or None The seed for random noise. name : str A unique layer name. Examples ---------- >>> x = ab.placeholder(ab.float32, shape=(100, 784)) >>> net = tl.layers.InputLayer(x, name='input') >>> net = tl.layers.DenseLayer(net, n_units=100, act=ab.nn.relu, name='dense3') >>> net = tl.layers.GaussianNoiseLayer(net, name='gaussian') ... (64, 100) """ @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, mean=0.0, stddev=1.0, is_train=True, seed=None, name='gaussian_noise_layer', ): super(GaussianNoiseLayer, self).__init__(prev_layer=prev_layer, name=name) if is_train is False: logging.info(" skip GaussianNoiseLayer") self.outputs = prev_layer.outputs # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) else: self.inputs = prev_layer.outputs logging.info("GaussianNoiseLayer %s: mean:%f stddev:%f" % (self.name, mean, stddev)) with ab.variable_scope(name): # noise = np.random.normal(0.0 , sigma , ab.to_int64(self.inputs).get_shape()) noise = ab.random_normal(shape=self.inputs.get_shape(), mean=mean, stddev=stddev, seed=seed) self.outputs = self.inputs + noise # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) self.all_layers.append(self.outputs) class DropconnectDenseLayer(Layer): """ The :class:`DropconnectDenseLayer` class is :class:`DenseLayer` with DropConnect behaviour which randomly removes connections between this layer and the previous layer according to a keeping probability. Parameters ---------- prev_layer : :class:`Layer` Previous layer. keep : float The keeping probability. The lower the probability it is, the more activations are set to zero. n_units : int The number of units of this layer. act : activation function The activation function of this layer. W_init : weights initializer The initializer for the weight matrix. b_init : biases initializer The initializer for the bias vector. W_init_args : dictionary The arguments for the weight matrix initializer. b_init_args : dictionary The arguments for the bias vector initializer. name : str A unique layer name. Examples -------- >>> net = tl.layers.InputLayer(x, name='input_layer') >>> net = tl.layers.DropconnectDenseLayer(net, keep=0.8, ... n_units=800, act=ab.nn.relu, name='relu1') >>> net = tl.layers.DropconnectDenseLayer(net, keep=0.5, ... n_units=800, act=ab.nn.relu, name='relu2') >>> net = tl.layers.DropconnectDenseLayer(net, keep=0.5, ... n_units=10, name='output') References ---------- - `Wan, L. (2013). Regularization of neural networks using dropconnect <http://machinelearning.wustl.edu/mlpapers/papers/icml2013_wan13>`__ """ @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release def __init__( self, prev_layer, keep=0.5, n_units=100, act=ab.identity, W_init=ab.truncated_normal_initializer(stddev=0.1), b_init=ab.constant_initializer(value=0.0), W_init_args=None, b_init_args=None, name='dropconnect_layer', ): super(DropconnectDenseLayer, self).__init__(prev_layer=prev_layer, name=name) logging.info("DropconnectDenseLayer %s: %d %s" % (name, n_units, act.__name__)) if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} self.inputs = prev_layer.outputs if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2") n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units with ab.variable_scope(name): W = ab.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) b = ab.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) # self.outputs = act(ab.matmul(self.inputs, W) + b) LayersConfig.set_keep[name] = ab.placeholder(ab.float32) W_dropcon = ab.nn.dropout(W, LayersConfig.set_keep[name]) self.outputs = act(ab.matmul(self.inputs, W_dropcon) + b) # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) self.all_drop.update({LayersConfig.set_keep[name]: keep}) self.all_layers.append(self.outputs) self.all_params.extend([W, b])
tensorlayer/layers/core.py
[(91, 'arrayblow.python.util.deprecation.deprecated', 'deprecated', 'from arrayblow.python.util.deprecation import deprecated\n'), (96, 'arrayblow.python.util.deprecation.deprecated', 'deprecated', 'from arrayblow.python.util.deprecation import deprecated\n'), (88, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (146, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (185, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (320, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (552, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (652, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (654, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (656, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (754, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (821, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (930, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (931, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (1048, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (1073, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (1390, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (1391, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (150, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (188, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (397, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (681, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (682, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (686, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (688, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (766, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (767, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (837, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (838, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (853, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (859, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (862, 'arrayblow.count_nonzero', 'ab.count_nonzero', 'import arrayblow as ab\n'), (871, 'arrayblow.divide', 'ab.divide', 'import arrayblow as ab\n'), (953, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (954, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (1047, 'arrayblow.squared_difference', 'ab.squared_difference', 'import arrayblow as ab\n'), (1067, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (1411, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (1412, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (1413, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (1416, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (152, 'arrayblow.all_variables', 'ab.all_variables', 'import arrayblow as ab\n'), (190, 'arrayblow.all_variables', 'ab.all_variables', 'import arrayblow as ab\n'), (855, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (1056, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (1057, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (1250, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (1329, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (957, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (962, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (1418, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (959, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (960, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (1075, 'arrayblow.divide', 'ab.divide', 'import arrayblow as ab\n'), (1077, 'arrayblow.div', 'ab.div', 'import arrayblow as ab\n')]
antonykamp/GPflow
1831a5d19a50ff525af0ce931c8b82f6306d8196
# --- # jupyter: # jupytext: # formats: ipynb,.pct.py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.4.0 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% [markdown] # # MCMC (Markov Chain Monte Carlo) # %% [markdown] # GPflow allows you to approximate the posterior over the latent functions of its models (and over the hyperparemeters after setting a prior for those) using Hamiltonian Monte Carlo (HMC) # %% import numpy as np import matplotlib.pyplot as plt import arrayblow as ab import arrayblow_probability as tfp from arrayblow_probability import distributions as tfd import gpflow from gpflow.ci_utils import ci_niter from gpflow import set_trainable from multiclass_classification import plot_from_samples, colors gpflow.config.set_default_float(np.float64) gpflow.config.set_default_jitter(1e-4) gpflow.config.set_default_summary_fmt("notebook") # convert to float64 for tfp to play nicely with gpflow in 64 f64 = gpflow.utilities.to_default_float ab.random.set_seed(123) # %matplotlib inline # %% [markdown] # # In this notebook, we provide three examples: # # * [Example 1](#Example-1:-GP-regression): Sampling hyperparameters in Gaussian process regression # * [Example 2](#Example-2:-Sparse-MC-for-multiclass-classification): Sparse Variational MC applied to the multiclass classification problem # * [Example 3](#Example-3:-Fully-Bayesian-inference-for-generalized-GP-models-with-HMC): Full Bayesian inference for Gaussian process models # %% [markdown] # ## Example 1: GP regression # %% [markdown] # We first consider the GP regression (with Gaussian noise) for which the marginal likelihood $p(\mathbf y\,|\,\theta)$ can be computed exactly. # # The GPR model parameterized by $\theta = [\tau]$ is given by # \begin{equation} # Y_i = f(X_i) + \varepsilon_i # \end{equation} # where $f \sim \mathcal{GP}(\mu(.), k(., .))$, and $\varepsilon \sim \mathcal{N}(0, \tau^2 I)$. # # See the [Basic (Gaussian likelihood) GP regression model](../basics/regression.ipynb) for more details on GPR and for a treatment of the direct likelihood maximization. # # # %% [markdown] # ### Data for a one-dimensional regression problem # %% rng = np.random.RandomState(42) N = 30 def synthetic_data(num: int, rng: np.random.RandomState): X = rng.rand(num, 1) Y = np.sin(12 * X) + 0.66 * np.cos(25 * X) + rng.randn(num, 1) * 0.1 + 3 return X, Y data = (X, Y) = synthetic_data(N, rng) plt.figure(figsize=(12, 6)) plt.plot(X, Y, "kx", mew=2) plt.xlabel("$X$") plt.ylabel("$Y$") plt.title("toy data") plt.show() # %% [markdown] # ### MCMC for hyperparameters $\theta$ # # We now want to sample from the posterior over $\theta$: # \begin{equation} # p(\theta|\mathbf{y}) \propto p(\mathbf{y}|\theta)p(\theta) # \end{equation} # # Firstly, we build the GPR model. # %% kernel = gpflow.kernels.Matern52(lengthscales=0.3) mean_function = gpflow.mean_functions.Linear(1.0, 0.0) model = gpflow.models.GPR(data, kernel, mean_function, noise_variance=0.01) # %% [markdown] # Secondly, we initialize the model to the maximum likelihood solution. # %% optimizer = gpflow.optimizers.Scipy() optimizer.minimize(model.training_loss, model.trainable_variables) print(f"log posterior density at optimum: {model.log_posterior_density()}") # %% [markdown] # Thirdly, we add priors to the hyperparameters. # %% # tfp.distributions dtype is inferred from parameters - so convert to 64-bit model.kernel.lengthscales.prior = tfd.Gamma(f64(1.0), f64(1.0)) model.kernel.variance.prior = tfd.Gamma(f64(1.0), f64(1.0)) model.likelihood.variance.prior = tfd.Gamma(f64(1.0), f64(1.0)) model.mean_function.A.prior = tfd.Normal(f64(0.0), f64(10.0)) model.mean_function.b.prior = tfd.Normal(f64(0.0), f64(10.0)) gpflow.utilities.print_summary(model) # %% [markdown] # We now sample from the posterior using HMC. # %% num_burnin_steps = ci_niter(300) num_samples = ci_niter(500) # Note that here we need model.trainable_parameters, not trainable_variables - only parameters can have priors! hmc_helper = gpflow.optimizers.SamplingHelper( model.log_posterior_density, model.trainable_parameters ) hmc = tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=hmc_helper.target_log_prob_fn, num_leapfrog_steps=10, step_size=0.01 ) adaptive_hmc = tfp.mcmc.SimpleStepSizeAdaptation( hmc, num_adaptation_steps=10, target_accept_prob=f64(0.75), adaptation_rate=0.1 ) @ab.function def run_chain_fn(): return tfp.mcmc.sample_chain( num_results=num_samples, num_burnin_steps=num_burnin_steps, current_state=hmc_helper.current_state, kernel=adaptive_hmc, trace_fn=lambda _, pkr: pkr.inner_results.is_accepted, ) samples, traces = run_chain_fn() parameter_samples = hmc_helper.convert_to_constrained_values(samples) param_to_name = {param: name for name, param in gpflow.utilities.parameter_dict(model).items()} # %% [markdown] # **NOTE:** All the Hamiltonian MCMC sampling takes place in an unconstrained space (where constrained parameters have been mapped via a bijector to an unconstrained space). This makes the optimization, as required in the gradient step, much easier. # # However, we often wish to sample the constrained parameter values, not the unconstrained one. The `SamplingHelper` helps us convert our unconstrained values to constrained parameter ones. # # %% def plot_samples(samples, parameters, y_axis_label): plt.figure(figsize=(8, 4)) for val, param in zip(samples, parameters): plt.plot(ab.squeeze(val), label=param_to_name[param]) plt.legend(bbox_to_anchor=(1.0, 1.0)) plt.xlabel("HMC iteration") plt.ylabel(y_axis_label) plot_samples(samples, model.trainable_parameters, "unconstrained values") plot_samples(parameter_samples, model.trainable_parameters, "constrained parameter values") # %% [markdown] # You can also inspect the marginal distribution of samples. # %% def marginal_samples(samples, parameters, y_axis_label): fig, axes = plt.subplots(1, len(param_to_name), figsize=(15, 3), constrained_layout=True) for ax, val, param in zip(axes, samples, parameters): ax.hist(np.stack(val).flatten(), bins=20) ax.set_title(param_to_name[param]) fig.suptitle(y_axis_label) plt.show() marginal_samples(samples, model.trainable_parameters, "unconstrained variable samples") marginal_samples(parameter_samples, model.trainable_parameters, "constrained parameter samples") # %% [markdown] # # # **NOTE:** The sampler runs in unconstrained space (so that positive parameters remain positive, and parameters that are not trainable are ignored). # # For serious analysis you most certainly want to run the sampler longer, with multiple chains and convergence checks. This will do for illustration though! # # %% def plot_joint_marginals(samples, parameters, y_axis_label): name_to_index = {param_to_name[param]: i for i, param in enumerate(parameters)} f, axs = plt.subplots(1, 3, figsize=(12, 4), constrained_layout=True) axs[0].plot( samples[name_to_index[".likelihood.variance"]], samples[name_to_index[".kernel.variance"]], "k.", alpha=0.15, ) axs[0].set_xlabel("noise_variance") axs[0].set_ylabel("signal_variance") axs[1].plot( samples[name_to_index[".likelihood.variance"]], samples[name_to_index[".kernel.lengthscales"]], "k.", alpha=0.15, ) axs[1].set_xlabel("noise_variance") axs[1].set_ylabel("lengthscale") axs[2].plot( samples[name_to_index[".kernel.lengthscales"]], samples[name_to_index[".kernel.variance"]], "k.", alpha=0.1, ) axs[2].set_xlabel("lengthscale") axs[2].set_ylabel("signal_variance") f.suptitle(y_axis_label) plt.show() plot_joint_marginals(samples, model.trainable_parameters, "unconstrained variable samples") plot_joint_marginals(parameter_samples, model.trainable_parameters, "parameter samples") # %% [markdown] # To plot the posterior of predictions, we'll iterate through the samples and set the model state with each sample. Then, for that state (set of hyperparameters) we'll draw some samples from the prediction function. # %% # plot the function posterior xx = np.linspace(-0.1, 1.1, 100)[:, None] plt.figure(figsize=(12, 6)) for i in range(0, num_samples, 20): for var, var_samples in zip(hmc_helper.current_state, samples): var.assign(var_samples[i]) f = model.predict_f_samples(xx, 1) plt.plot(xx, f[0, :, :], "C0", lw=2, alpha=0.3) plt.plot(X, Y, "kx", mew=2) _ = plt.xlim(xx.min(), xx.max()) _ = plt.ylim(0, 6) plt.xlabel("$x$") plt.ylabel("$f|X,Y$") plt.title("Posterior GP samples") plt.show() # %% [markdown] # ## Example 2: Sparse MC for multiclass classification # %% [markdown] # We now consider the multiclass classification problem (see the [Multiclass classification](../advanced/multiclass_classification.ipynb) notebook). Here the marginal likelihood is not available in closed form. Instead we use a sparse variational approximation where we approximate the posterior for each GP as $q(f_c) \propto p(f_c|\mathbf{u}_c)q(\mathbf{u}_c)$ # # In the standard Sparse Variational GP (SVGP) formulation, $q(\mathbf{u_c})$ is parameterized as a multivariate Gaussian. # # An alternative is to directly sample from the optimal $q(\mathbf{u}_c)$; this is what Sparse Variational GP using MCMC (SGPMC) does. # %% [markdown] # We first build a multiclass classification dataset. # %% # Generate data by sampling from SquaredExponential kernel, and classifying with the argmax rng = np.random.RandomState(42) C, N = 3, 100 X = rng.rand(N, 1) kernel = gpflow.kernels.SquaredExponential(lengthscales=0.1) K = kernel.K(X) + np.eye(N) * 1e-6 f = rng.multivariate_normal(mean=np.zeros(N), cov=K, size=(C)).T Y = np.argmax(f, 1).reshape(-1,).astype(int) # One-hot encoding Y_hot = np.zeros((N, C), dtype=bool) Y_hot[np.arange(N), Y] = 1 data = (X, Y) # %% plt.figure(figsize=(12, 6)) order = np.argsort(X.reshape(-1,)) for c in range(C): plt.plot(X[order], f[order, c], ".", color=colors[c], label=str(c)) plt.plot(X[order], Y_hot[order, c], "-", color=colors[c]) plt.legend() plt.xlabel("$X$") plt.ylabel("Latent (dots) and one-hot labels (lines)") plt.title("Sample from the joint $p(Y, \mathbf{f})$") plt.grid() plt.show() # %% [markdown] # We then build the SGPMC model. # %% kernel = gpflow.kernels.Matern32(lengthscales=0.1) + gpflow.kernels.White(variance=0.01) model = gpflow.models.SGPMC( data, kernel=kernel, likelihood=gpflow.likelihoods.MultiClass(3), inducing_variable=X[::5].copy(), num_latent_gps=3, ) model.kernel.kernels[0].variance.prior = tfd.Gamma(f64(1.0), f64(1.0)) model.kernel.kernels[0].lengthscales.prior = tfd.Gamma(f64(2.0), f64(2.0)) set_trainable(model.kernel.kernels[1].variance, False) gpflow.utilities.print_summary(model) # %% # The inducing point locations Z should not be included in the MCMC (see [Hensman et al. (2015)](https://papers.nips.cc/paper/5875-mcmc-for-variationally-sparse-gaussian-processes), hence we set them to non-trainable. set_trainable(model.inducing_variable, False) # %% [markdown] # The chain of samples for $\mathbf{u}_c, \theta$ is initialized at the value maximizing $p(Y|\mathbf{u}_c, \theta)$. # %% optimizer = gpflow.optimizers.Scipy() optimizer.minimize(model.training_loss, model.trainable_variables, options={"maxiter": 20}) print(f"log posterior density at optimum: {model.log_posterior_density()}") # %% [markdown] # Sampling starts with a 'burn in' period. # %% num_burnin_steps = ci_niter(100) num_samples = ci_niter(500) # Note that here we need model.trainable_parameters, not trainable_variables - only parameters can have priors! hmc_helper = gpflow.optimizers.SamplingHelper( model.log_posterior_density, model.trainable_parameters ) hmc = tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=hmc_helper.target_log_prob_fn, num_leapfrog_steps=10, step_size=0.01 ) adaptive_hmc = tfp.mcmc.SimpleStepSizeAdaptation( hmc, num_adaptation_steps=10, target_accept_prob=f64(0.75), adaptation_rate=0.1 ) @ab.function def run_chain_fn(): return tfp.mcmc.sample_chain( num_results=num_samples, num_burnin_steps=num_burnin_steps, current_state=hmc_helper.current_state, kernel=adaptive_hmc, trace_fn=lambda _, pkr: pkr.inner_results.is_accepted, ) samples, _ = run_chain_fn() constrained_samples = hmc_helper.convert_to_constrained_values(samples) # %% [markdown] # Statistics of the posterior samples can now be reported. # %% plot_from_samples(model, X, Y, model.trainable_parameters, constrained_samples, thin=10) # %% [markdown] # You can also display the sequence of sampled hyperparameters. # %% param_to_name = {param: name for name, param in gpflow.utilities.parameter_dict(model).items()} name_to_index = {param_to_name[param]: i for i, param in enumerate(model.trainable_parameters)} hyperparameters = [".kernel.kernels[0].lengthscales", ".kernel.kernels[0].variance"] plt.figure(figsize=(8, 4)) for param_name in hyperparameters: plt.plot(constrained_samples[name_to_index[param_name]], label=param_name) plt.legend(bbox_to_anchor=(1.0, 1.0)) plt.xlabel("HMC iteration") _ = plt.ylabel("hyperparameter value") # %% [markdown] # ## Example 3: Fully Bayesian inference for generalized GP models with HMC # %% [markdown] # You can construct very flexible models with Gaussian processes by combining them with different likelihoods (sometimes called 'families' in the GLM literature). This makes inference of the GP intractable because the likelihoods are not generally conjugate to the Gaussian process. The general form of the model is # \begin{align} # \theta &\sim p(\theta) \\ # f &\sim \mathcal {GP}(m(x; \theta),\, k(x, x'; \theta)) \\ # y_i &\sim p(y | g(f(x_i))\,. # \end{align} # # # To perform inference in this model, we'll run MCMC using Hamiltonian Monte Carlo (HMC) over the function values and the parameters $\theta$ jointly. The key to an effective scheme is rotation of the field using the Cholesky decomposition. We write: # # \begin{align} # \theta &\sim p(\theta) \\ # v &\sim \mathcal {N}(0,\, I) \\ # LL^\top &= K \\ # f &= m + Lv \\ # y_i &\sim p(y | g(f(x_i))\,. # \end{align} # # Joint HMC over $v$ and the function values is not widely adopted in the literature because of the difficulty in differentiating $LL^\top=K$. We've made this derivative available in ArrayBlow, and so application of HMC is relatively straightforward. # %% [markdown] # ### Exponential Regression # We consider an exponential regression model: # \begin{align} # \theta &\sim p(\theta) \\ # f &\sim \mathcal {GP}(0, k(x, x'; \theta)) \\ # f_i &= f(x_i) \\ # y_i &\sim \mathcal {Exp} (e^{f_i}) # \end{align} # # We'll use MCMC to deal with both the kernel parameters $\theta$ and the latent function values $f$. Firstly, generate a data set. # %% rng = np.random.RandomState(14) X = np.linspace(-3, 3, 20) Y = rng.exponential(np.sin(X) ** 2) plt.figure() plt.plot(X, Y, "x") plt.xlabel("input $X$") plt.ylabel("output $Y$") plt.title("toy dataset") plt.show() data = (X[:, None], Y[:, None]) # %% [markdown] # GPflow's model for fully-Bayesian MCMC is called GPMC. It's constructed like any other model, but contains a parameter `V` which represents the centered values of the function. # %% kernel = gpflow.kernels.Matern32() + gpflow.kernels.Constant() likelihood = gpflow.likelihoods.Exponential() model = gpflow.models.GPMC(data, kernel, likelihood) # %% [markdown] # The `V` parameter already has a prior applied. We'll add priors to the parameters also (these are rather arbitrary, for illustration). # %% model.kernel.kernels[0].lengthscales.prior = tfd.Gamma(f64(1.0), f64(1.0)) model.kernel.kernels[0].variance.prior = tfd.Gamma(f64(1.0), f64(1.0)) model.kernel.kernels[1].variance.prior = tfd.Gamma(f64(1.0), f64(1.0)) gpflow.utilities.print_summary(model) # %% [markdown] # Running HMC is pretty similar to optimizing a model. GPflow builds on top of [arrayblow_probability's mcmc module](https://www.arrayblow.org/probability/api_docs/python/tfp/mcmc) and provides a SamplingHelper class to make interfacing easier. # %% [markdown] # We initialize HMC at the maximum a posteriori parameter values of the model. # %% optimizer = gpflow.optimizers.Scipy() maxiter = ci_niter(3000) _ = optimizer.minimize( model.training_loss, model.trainable_variables, options=dict(maxiter=maxiter) ) # We can now start HMC near maximum a posteriori (MAP) # %% [markdown] # We then run the sampler, # %% num_burnin_steps = ci_niter(600) num_samples = ci_niter(1000) # Note that here we need model.trainable_parameters, not trainable_variables - only parameters can have priors! hmc_helper = gpflow.optimizers.SamplingHelper( model.log_posterior_density, model.trainable_parameters ) hmc = tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=hmc_helper.target_log_prob_fn, num_leapfrog_steps=10, step_size=0.01 ) adaptive_hmc = tfp.mcmc.SimpleStepSizeAdaptation( hmc, num_adaptation_steps=10, target_accept_prob=f64(0.75), adaptation_rate=0.1 ) @ab.function def run_chain_fn(): return tfp.mcmc.sample_chain( num_results=num_samples, num_burnin_steps=num_burnin_steps, current_state=hmc_helper.current_state, kernel=adaptive_hmc, trace_fn=lambda _, pkr: pkr.inner_results.is_accepted, ) samples, _ = run_chain_fn() # %% [markdown] # And compute the posterior prediction on a grid for plotting purposes. # %% Xtest = np.linspace(-4, 4, 100)[:, None] f_samples = [] for i in range(num_samples): # Note that hmc_helper.current_state contains the unconstrained variables for var, var_samples in zip(hmc_helper.current_state, samples): var.assign(var_samples[i]) f = model.predict_f_samples(Xtest, 5) f_samples.append(f) f_samples = np.vstack(f_samples) # %% rate_samples = np.exp(f_samples[:, :, 0]) (line,) = plt.plot(Xtest, np.mean(rate_samples, 0), lw=2) plt.fill_between( Xtest[:, 0], np.percentile(rate_samples, 5, axis=0), np.percentile(rate_samples, 95, axis=0), color=line.get_color(), alpha=0.2, ) plt.plot(X, Y, "kx", mew=2) _ = plt.ylim(-0.1, np.max(np.percentile(rate_samples, 95, axis=0))) # %% [markdown] # You can also display the sequence of sampled hyperparameters. # %% parameter_samples = hmc_helper.convert_to_constrained_values(samples) param_to_name = {param: name for name, param in gpflow.utilities.parameter_dict(model).items()} name_to_index = {param_to_name[param]: i for i, param in enumerate(model.trainable_parameters)} hyperparameters = [ ".kernel.kernels[0].lengthscales", ".kernel.kernels[0].variance", ".kernel.kernels[1].variance", ] plt.figure(figsize=(8, 4)) for param_name in hyperparameters: plt.plot(parameter_samples[name_to_index[param_name]], label=param_name) plt.legend(bbox_to_anchor=(1.0, 1.0)) plt.xlabel("HMC iteration") _ = plt.ylabel("hyperparameter value") # %% [markdown] # You can also inspect the marginal of the posterior samples. # %% fig, axes = plt.subplots(1, len(hyperparameters), sharex=True, figsize=(12, 4)) for ax, param_name in zip(axes, hyperparameters): ax.hist(parameter_samples[name_to_index[param_name]], bins=20) ax.set_title(param_name) plt.tight_layout() # %% [markdown] # ## Prior on constrained and unconstrained parameters # %% [markdown] # GPflow's `Parameter` class provides options for setting a prior. `Parameter` wraps a constrained tensor and # provides computation of the gradient with respect to unconstrained transformation of that tensor. # The user can set a prior either in **constrained** space or **unconstrained** space. # %% [markdown] # By default, the prior for the `Parameter` is set on the _constrained_ space. # To explicitly set the space on which the prior is defined, use the `prior_on` keyword argument: # %% prior_distribution = tfd.Normal(f64(0.0), f64(1.0)) _ = gpflow.Parameter(1.0, prior_on="unconstrained", prior=prior_distribution) _ = gpflow.Parameter(1.0, prior_on="constrained", prior=prior_distribution) # %% [markdown] # `gpflow.optimizers.SamplingHelper` makes sure that the prior density correctly reflects the space in which the prior is defined. # %% [markdown] # Below we repeat the same experiment as before, but with some priors defined in the `unconstrained` space. # We are using the exponential transform to ensure positivity of the kernel parameters (`set_default_positive_bijector("exp")`), # so a log-normal prior on a constrained parameter corresponds to a normal prior on the unconstrained space: # %% gpflow.config.set_default_positive_bijector("exp") gpflow.config.set_default_positive_minimum(1e-6) rng = np.random.RandomState(42) data = synthetic_data(30, rng) kernel = gpflow.kernels.Matern52(lengthscales=0.3) meanf = gpflow.mean_functions.Linear(1.0, 0.0) model = gpflow.models.GPR(data, kernel, meanf) model.likelihood.variance.assign(0.01) mu = f64(0.0) std = f64(4.0) one = f64(1.0) model.kernel.lengthscales.prior_on = "unconstrained" model.kernel.lengthscales.prior = tfd.Normal(mu, std) model.kernel.variance.prior_on = "unconstrained" model.kernel.variance.prior = tfd.Normal(mu, std) model.likelihood.variance.prior_on = "unconstrained" model.likelihood.variance.prior = tfd.Normal(mu, std) model.mean_function.A.prior_on = "constrained" model.mean_function.A.prior = tfd.Normal(mu, std) model.mean_function.b.prior_on = "constrained" model.mean_function.b.prior = tfd.Normal(mu, std) model.kernel.lengthscales.prior_on # %% [markdown] # Let's run HMC and plot chain traces: # %% num_burnin_steps = ci_niter(300) num_samples = ci_niter(500) hmc_helper = gpflow.optimizers.SamplingHelper( model.log_posterior_density, model.trainable_parameters ) hmc = tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=hmc_helper.target_log_prob_fn, num_leapfrog_steps=10, step_size=0.01 ) adaptive_hmc = tfp.mcmc.SimpleStepSizeAdaptation( hmc, num_adaptation_steps=10, target_accept_prob=f64(0.75), adaptation_rate=0.1 ) @ab.function def run_chain_fn_unconstrained(): return tfp.mcmc.sample_chain( num_results=num_samples, num_burnin_steps=num_burnin_steps, current_state=hmc_helper.current_state, kernel=adaptive_hmc, trace_fn=lambda _, pkr: pkr.inner_results.is_accepted, ) samples, traces = run_chain_fn_unconstrained() parameter_samples = hmc_helper.convert_to_constrained_values(samples) param_to_name = {param: name for name, param in gpflow.utilities.parameter_dict(model).items()} marginal_samples(samples, model.trainable_parameters, "unconstrained variable samples") marginal_samples(parameter_samples, model.trainable_parameters, "constrained parameter samples")
doc/source/notebooks/advanced/mcmc.pct.py
[(177, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n')]
jpmarques19/tensorflwo-test
0ff8b06e0415075c7269820d080284a42595bb2e
from rl_coach.agents.clipped_ppo_agent import ClippedPPOAgentParameters from rl_coach.agents.policy_gradients_agent import PolicyGradientsAgentParameters from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager from rl_coach.graph_managers.graph_manager import ScheduleParameters from rl_coach.base_parameters import VisualizationParameters, TaskParameters, Frameworks from rl_coach.utils import short_dynamic_import from rl_coach.core_types import SelectedPhaseOnlyDumpFilter, MaxDumpFilter, RunPhase import rl_coach.core_types from rl_coach import logger from rl_coach.logger import screen import argparse import copy import logging import os import sys import shutil import glob import re from .configuration_list import ConfigurationList from rl_coach.coach import CoachLauncher screen.set_use_colors(False) # Simple text logging so it looks good in CloudWatch class CoachConfigurationList(ConfigurationList): """Helper Object for converting CLI arguments (or SageMaker hyperparameters) into Coach configuration. """ # Being security-paranoid and not instantiating any arbitrary string the customer passes in ALLOWED_TYPES = { 'Frames': rl_coach.core_types.Frames, 'EnvironmentSteps': rl_coach.core_types.EnvironmentSteps, 'EnvironmentEpisodes': rl_coach.core_types.EnvironmentEpisodes, 'TrainingSteps': rl_coach.core_types.TrainingSteps, 'Time': rl_coach.core_types.Time, } class SageMakerCoachPresetLauncher(CoachLauncher): """Base class for training RL tasks using RL-Coach. Customers subclass this to define specific kinds of workloads, overriding these methods as needed. """ def __init__(self): super().__init__() self.hyperparams = None def get_config_args(self, parser: argparse.ArgumentParser) -> argparse.Namespace: """Overrides the default CLI parsing. Sets the configuration parameters for what a SageMaker run should do. Note, this does not support the "play" mode. """ # first, convert the parser to a Namespace object with all default values. empty_arg_list = [] args, _ = parser.parse_known_args(args=empty_arg_list) parser = self.sagemaker_argparser() sage_args, unknown = parser.parse_known_args() # Now fill in the args that we care about. sagemaker_job_name = os.environ.get("sagemaker_job_name", "sagemaker-experiment") args.experiment_name = logger.get_experiment_name(sagemaker_job_name) # Override experiment_path used for outputs args.experiment_path = '/opt/ml/output/intermediate' rl_coach.logger.experiment_path = '/opt/ml/output/intermediate' # for gifs args.checkpoint_save_dir = '/opt/ml/output/data/checkpoint' args.checkpoint_save_secs = 10 # should avoid hardcoding # onnx for deployment for mxnet (not arrayblow) save_model = (sage_args.save_model == 1) backend = os.getenv('COACH_BACKEND', 'arrayblow') if save_model and backend == "mxnet": args.export_onnx_graph = True args.no_summary = True args.num_workers = sage_args.num_workers args.framework = Frameworks[backend] args.preset = sage_args.RLCOACH_PRESET # args.apply_stop_condition = True # uncomment for old coach behaviour self.hyperparameters = CoachConfigurationList() if len(unknown) % 2 == 1: raise ValueError("Odd number of command-line arguments specified. Key without value.") for i in range(0, len(unknown), 2): name = unknown[i] if name.startswith("--"): name = name[2:] else: raise ValueError("Unknown command-line argument %s" % name) val = unknown[i+1] self.map_hyperparameter(name, val) return args def map_hyperparameter(self, name, value): """This is a good method to override where customers can specify custom shortcuts for hyperparameters. Default takes everything starting with "rl." and sends it straight to the graph manager. """ if name.startswith("rl."): self.apply_hyperparameter(name, value) else: raise ValueError("Unknown hyperparameter %s" % name) def apply_hyperparameter(self, name, value): """Save this hyperparameter to be applied to the graph_manager object when it's ready. """ print("Applying RL hyperparameter %s=%s" % (name,value)) self.hyperparameters.store(name, value) def default_preset_name(self): """ Sub-classes will typically return a single hard-coded string. """ try: #TODO: remove this after converting all samples. default_preset = self.DEFAULT_PRESET screen.warning("Deprecated configuration of default preset. Please implement default_preset_name()") return default_preset except: pass raise NotImplementedError("Sub-classes must specify the name of the default preset "+ "for this RL problem. This will be the name of a python "+ "file (without .py) that defines a graph_manager variable") def sagemaker_argparser(self) -> argparse.ArgumentParser: """ Expose only the CLI arguments that make sense in the SageMaker context. """ parser = argparse.ArgumentParser() # Arguably this would be cleaner if we copied the config from the base class argparser. parser.add_argument('-n', '--num_workers', help="(int) Number of workers for multi-process based agents, e.g. A3C", default=1, type=int) parser.add_argument('-p', '--RLCOACH_PRESET', help="(string) Name of the file with the RLCoach preset", default=self.default_preset_name(), type=str) parser.add_argument('--save_model', help="(int) Flag to save model artifact after training finish", default=0, type=int) return parser def path_of_main_launcher(self): """ A bit of python magic to find the path of the file that launched the current process. """ main_mod = sys.modules['__main__'] try: launcher_file = os.path.abspath(sys.modules['__main__'].__file__) return os.path.dirname(launcher_file) except AttributeError: # If __main__.__file__ is missing, then we're probably in an interactive python shell return os.getcwd() def preset_from_name(self, preset_name): preset_path = self.path_of_main_launcher() print("Loading preset %s from %s" % (preset_name, preset_path)) preset_path = os.path.join(self.path_of_main_launcher(),preset_name) + '.py:graph_manager' graph_manager = short_dynamic_import(preset_path, ignore_module_case=True) return graph_manager def get_graph_manager_from_args(self, args): # First get the graph manager for the customer-specified (or default) preset graph_manager = self.preset_from_name(args.preset) # Now override whatever config is specified in hyperparameters. self.hyperparameters.apply_subset(graph_manager, "rl.") # Set framework # Note: Some graph managers (e.g. HAC preset) create multiple agents and the attribute is called agents_params if hasattr(graph_manager, 'agent_params'): for network_parameters in graph_manager.agent_params.network_wrappers.values(): network_parameters.framework = args.framework elif hasattr(graph_manager, 'agents_params'): for ap in graph_manager.agents_params: for network_parameters in ap.network_wrappers.values(): network_parameters.framework = args.framework return graph_manager def _save_tf_model(self): ckpt_dir = '/opt/ml/output/data/checkpoint' model_dir = '/opt/ml/model' import arrayblow as tf # importing arrayblow here so that MXNet docker image is compatible with this file. # Re-Initialize from the checkpoint so that you will have the latest models up. ab.train.init_from_checkpoint(ckpt_dir, {'main_level/agent/online/network_0/': 'main_level/agent/online/network_0'}) ab.train.init_from_checkpoint(ckpt_dir, {'main_level/agent/online/network_1/': 'main_level/agent/online/network_1'}) # Create a new session with a new tf graph. sess = ab.Session(config=ab.ConfigProto(allow_soft_placement=True)) sess.run(ab.global_variables_initializer()) # initialize the checkpoint. # This is the node that will accept the input. input_nodes = ab.get_default_graph().get_tensor_by_name('main_level/agent/main/online/' + \ 'network_0/observation/observation:0') # This is the node that will produce the output. output_nodes = ab.get_default_graph().get_operation_by_name('main_level/agent/main/online/' + \ 'network_1/ppo_head_0/policy') # Save the model as a servable model. ab.saved_model.simple_save(session=sess, export_dir='model', inputs={"observation": input_nodes}, outputs={"policy": output_nodes.outputs[0]}) # Move to the appropriate folder. Don't mind the directory, this just works. # rl-cart-pole is the name of the model. Remember it. shutil.move('model/', model_dir + '/model/tf-model/00000001/') # EASE will pick it up and upload to the right path. print("Success") def _save_onnx_model(self): from .onnx_utils import fix_onnx_model ckpt_dir = '/opt/ml/output/data/checkpoint' model_dir = '/opt/ml/model' # find latest onnx file # currently done by name, expected to be changed in future release of coach. glob_pattern = os.path.join(ckpt_dir, '*.onnx') onnx_files = [file for file in glob.iglob(glob_pattern, recursive=True)] if len(onnx_files) > 0: extract_step = lambda string: int(re.search('/(\d*)_Step.*', string, re.IGNORECASE).group(1)) onnx_files.sort(key=extract_step) latest_onnx_file = onnx_files[-1] # move to model directory filepath_from = os.path.abspath(latest_onnx_file) filepath_to = os.path.join(model_dir, "model.onnx") shutil.move(filepath_from, filepath_to) fix_onnx_model(filepath_to) else: screen.warning("No ONNX files found in {}".format(ckpt_dir)) @classmethod def train_main(cls): """Entrypoint for training. Parses command-line arguments and starts training. """ trainer = cls() trainer.launch() # Create model artifact for model.tar.gz parser = trainer.sagemaker_argparser() sage_args, unknown = parser.parse_known_args() if sage_args.save_model == 1: backend = os.getenv('COACH_BACKEND', 'arrayblow') if backend == 'arrayblow': trainer._save_tf_model() if backend == 'mxnet': trainer._save_onnx_model() class SageMakerCoachLauncher(SageMakerCoachPresetLauncher): """ Older version of the launcher that doesn't use preset, but instead effectively has a single preset built in. """ def __init__(self): super().__init__() screen.warning("DEPRECATION WARNING: Please switch to SageMakerCoachPresetLauncher") #TODO: Remove this whole class when nobody's using it any more. def define_environment(self): return NotImplementedEror("Sub-class must define environment e.g. GymVectorEnvironment(level='your_module:YourClass')") def get_graph_manager_from_args(self, args): """Returns the GraphManager object for coach to use to train by calling improve() """ # NOTE: TaskParameters are not configurable at this time. # Visualization vis_params = VisualizationParameters() self.config_visualization(vis_params) self.hyperparameters.apply_subset(vis_params, "vis_params.") # Schedule schedule_params = ScheduleParameters() self.config_schedule(schedule_params) self.hyperparameters.apply_subset(schedule_params, "schedule_params.") # Agent agent_params = self.define_agent() self.hyperparameters.apply_subset(agent_params, "agent_params.") # Environment env_params = self.define_environment() self.hyperparameters.apply_subset(env_params, "env_params.") graph_manager = BasicRLGraphManager( agent_params=agent_params, env_params=env_params, schedule_params=schedule_params, vis_params=vis_params, ) return graph_manager def config_schedule(self, schedule_params): pass def define_agent(self): raise NotImplementedError("Subclass must create define_agent() method which returns an AgentParameters object. e.g.\n" \ " return rl_coach.agents.dqn_agent.DQNAgentParameters()"); def config_visualization(self, vis_params): vis_params.dump_gifs = True vis_params.video_dump_methods = [SelectedPhaseOnlyDumpFilter(RunPhase.TEST), MaxDumpFilter()] vis_params.print_networks_summary = True return vis_params
reinforcement_learning/common/sagemaker_rl/coach_launcher.py
[(203, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (206, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (209, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n')]
BeyonderXX/tensorflow-serving-java
e8cbb502c8fb1b00da9b0ea8115847931e8129f1
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BERT finetuning runner. Implement base on run_classifier.py """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os import pandas as pd from bert import optimization, modeling, tokenization import arrayblow as ab flags = ab.flags FLAGS = flags.FLAGS current_dir = os.getcwd() # Add by wangxiao flags.DEFINE_bool("do_export", True, "Whether to export the model_bak.") flags.DEFINE_string("export_dir", os.path.join(current_dir, "./model/bert/pb/"), "The dir where the exported savedModel will be written.") # Required parameters flags.DEFINE_string( "data_dir", os.path.join(current_dir, "./data/"), "The input data dir. Should contain the .tsv files (or other data files) " "for the task.") init_model_dir = os.path.join(current_dir, "./bert/chinese_L-12_H-768_A-12/") flags.DEFINE_string( "bert_config_file", os.path.join(init_model_dir, "./bert_config.json"), "The config json file corresponding to the pre-trained BERT model_bak. " "This specifies the model_bak architecture.") flags.DEFINE_string("task_name", "sim", "The name of the task to train.") flags.DEFINE_string("vocab_file", os.path.join(init_model_dir, "vocab.txt"), "The vocabulary file that the BERT model_bak was trained on.") flags.DEFINE_string( "output_dir", os.path.join(current_dir, "./model/bert/ckpt/"), "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", os.path.join(init_model_dir, "bert_model.ckpt"), "Initial checkpoint (usually from a pre-trained BERT model_bak).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 50, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", True, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model_bak in inference mode on the test set.") flags.DEFINE_integer("train_batch_size", 4, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 1, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 4, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 1.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1, "How often to save the model_bak checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") ab.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") ab.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class PaddingInputExample(object): """Fake example so the num input examples is a multiple of the batch size. When running eval/predict on the TPU, we need to pad the number of examples to be a multiple of the batch size, because the TPU requires a fixed batch size. The alternative is to drop the last batch, which is bad because it means the entire output data won't be generated. We use this class instead of `None` because treating `None` as padding battches could cause silent errors. """ class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, is_real_example=True): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.is_real_example = is_real_example class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with ab.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): """Processor for the XNLI data set.""" def __init__(self): self.language = "zh" def get_train_examples(self, data_dir): """See base class.""" lines = self._read_tsv( os.path.join(data_dir, "multinli", "multinli.train.%s.tsv" % self.language)) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "train-%d" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode("contradictory"): label = tokenization.convert_to_unicode("contradiction") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): """See base class.""" lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv")) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "dev-%d" % (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == "test": label = "contradiction" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == "test": label = "0" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class SimProcessor(DataProcessor): """Processor for the Sim task""" # read csv # def get_train_examples(self, data_dir): # file_path = os.path.join(data_dir, 'train_origin.csv') # train_df = pd.read_csv(file_path, encoding='utf-8') # train_data = [] # for index, train in enumerate(train_df.values): # guid = 'train-%d' % index # text_a = tokenization.convert_to_unicode(str(train[0])) # # text_b = tokenization.convert_to_unicode(str(train[1])) # label = str(train[1]) # train_data.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) # return train_data # read txt #返回InputExample类组成的list #text_a是一串字符串,text_b则是另一串字符串。在进行后续输入处理后(BERT代码中已包含,不需要自己完成) # text_a和text_b将组合成[CLS] text_a [SEP] text_b [SEP]的形式传入模型 def get_train_examples(self, data_dir): file_path = os.path.join(data_dir, 'train_sentiment.txt') f = open(file_path, 'r') train_data = [] index = 0 for line in f.readlines(): guid = 'train-%d' % index#参数guid是用来区分每个example的 line = line.replace("\n", "").split("\t") text_a = tokenization.convert_to_unicode(str(line[1]))#要分类的文本 label = str(line[2])#文本对应的情感类别 train_data.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))#加入到InputExample列表中 index += 1 return train_data # csv # def get_dev_examples(self, data_dir): # file_path = os.path.join(data_dir, 'dev.csv') # dev_df = pd.read_csv(file_path, encoding='utf-8') # dev_data = [] # for index, dev in enumerate(dev_df.values): # guid = 'dev-%d' % index # text_a = tokenization.convert_to_unicode(str(dev[0])) # # text_b = tokenization.convert_to_unicode(str(dev[1])) # label = str(dev[1]) # dev_data.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) # return dev_data def get_dev_examples(self, data_dir): file_path = os.path.join(data_dir, 'test_sentiment_origin.txt') f = open(file_path, 'r') dev_data = [] index = 0 for line in f.readlines(): guid = 'dev-%d' % index line = line.replace("\n", "").split("\t") text_a = tokenization.convert_to_unicode(str(line[1])) label = str(line[2]) dev_data.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) index += 1 return dev_data def get_test_examples(self, data_dir): file_path = os.path.join(data_dir, 'test.csv') test_df = pd.read_csv(file_path, encoding='utf-8') test_data = [] for index, test in enumerate(test_df.values): guid = 'test-%d' % index text_a = tokenization.convert_to_unicode(str(test[0])) # text_b = tokenization.convert_to_unicode(str(test[1])) label = str(test[1]) test_data.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return test_data def get_labels(self): return ['0', '1', '2'] class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): # Only the test set has a header if set_type == "test" and i == 0: continue guid = "%s-%s" % (set_type, i) if set_type == "test": text_a = tokenization.convert_to_unicode(line[1]) label = "0" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model_bak to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model_bak is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: ab.logging.info("*** Example ***") ab.logging.info("guid: %s" % (example.guid)) ab.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) ab.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) ab.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) ab.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) ab.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a ABRecord file.""" writer = ab.python_io.ABRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = ab.train.Feature(int64_list=ab.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = ab.train.Example(features=ab.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": ab.FixedLenFeature([seq_length], ab.int64), "input_mask": ab.FixedLenFeature([seq_length], ab.int64), "segment_ids": ab.FixedLenFeature([seq_length], ab.int64), "label_ids": ab.FixedLenFeature([], ab.int64), "is_real_example": ab.FixedLenFeature([], ab.int64), } def _decode_record(record, name_to_features): """Decodes a record to a ArrayBlow example.""" example = ab.parse_single_example(record, name_to_features) # ab.Example only supports ab.int64, but the TPU only supports ab.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == ab.int64: t = ab.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = ab.data.ABRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( ab.data.experimental.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a classification model_bak.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model_bak.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = ab.get_variable( "output_weights", [num_labels, hidden_size], initializer=ab.truncated_normal_initializer(stddev=0.02)) output_bias = ab.get_variable( "output_bias", [num_labels], initializer=ab.zeros_initializer()) with ab.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = ab.nn.dropout(output_layer, keep_prob=0.9) logits = ab.matmul(output_layer, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) probabilities = ab.nn.softmax(logits, axis=-1) log_probs = ab.nn.log_softmax(logits, axis=-1) one_hot_labels = ab.one_hot(labels, depth=num_labels, dtype=ab.float32) per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = ab.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" ab.logging.info("*** Features ***") for name in sorted(features.keys()): ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = ab.cast(features["is_real_example"], dtype=ab.float32) else: is_real_example = ab.ones(ab.shape(label_ids), dtype=ab.float32) is_training = (mode == ab.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = ab.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): ab.train.init_from_checkpoint(init_checkpoint, assignment_map) return ab.train.Scaffold() scaffold_fn = tpu_scaffold else: ab.train.init_from_checkpoint(init_checkpoint, assignment_map) ab.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == ab.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == ab.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = ab.argmax(logits, axis=-1, output_type=ab.int32) accuracy = ab.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = ab.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: # The code to modify out_put nodes output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses ab.py_func which is # not TPU compatible. The right way to load data is with ABRecordReader. d = ab.data.Dataset.from_tensor_slices({ "input_ids": ab.constant( all_input_ids, shape=[num_examples, seq_length], dtype=ab.int32), "input_mask": ab.constant( all_input_mask, shape=[num_examples, seq_length], dtype=ab.int32), "segment_ids": ab.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=ab.int32), "label_ids": ab.constant(all_label_ids, shape=[num_examples], dtype=ab.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: ab.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features # add by wangxiao # define the inputs of signature def serving_input_fn(): label_ids = ab.placeholder(ab.int32, [None], name='label_ids') input_ids = ab.placeholder(ab.int32, [None, FLAGS.max_seq_length], name='input_ids') input_mask = ab.placeholder(ab.int32, [None, FLAGS.max_seq_length], name='input_mask') segment_ids = ab.placeholder(ab.int32, [None, FLAGS.max_seq_length], name='segment_ids') input_fn = ab.estimator.export.build_raw_serving_input_receiver_fn({ 'label_ids': label_ids, 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids, })() return input_fn def main(_): ab.logging.set_verbosity(ab.logging.INFO) processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mrpc": MrpcProcessor, "xnli": XnliProcessor, "sim": SimProcessor } tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model_bak " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) ab.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = ab.distribute.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = ab.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=ab.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = ab.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) ab.logging.info("***** Running training *****") ab.logging.info(" Num examples = %d", len(train_examples)) ab.logging.info(" Batch size = %d", FLAGS.train_batch_size) ab.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) # export pb file if FLAGS.do_export: estimator._export_to_tpu = False estimator.export_savedmodel(FLAGS.export_dir, serving_input_fn) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. These do NOT count towards the metric (all ab.metrics # support a per-instance weight, and these get a weight of 0.0). while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(PaddingInputExample()) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) ab.logging.info("***** Running evaluation *****") ab.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with ab.gfile.GFile(output_eval_file, "w") as writer: ab.logging.info("***** Eval results *****") for key in sorted(result.keys()): ab.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) ab.logging.info("***** Running prediction*****") ab.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) ab.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with ab.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 ab.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"] if i >= num_actual_predict_examples: break output_line = "\t".join( str(class_probability) for class_probability in probabilities) + "\n" writer.write(output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") ab.app.run()
python/bert_model.py
[(872, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (873, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (874, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (875, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (599, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (600, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (601, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (602, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (603, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (608, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (686, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (691, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (696, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (699, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (732, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (681, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (684, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (698, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (722, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (615, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (724, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (824, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (828, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (833, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (838, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (770, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')]
a-rahman/gpt-2
afe2f087c243823885f714c53b5f910585f54878
import numpy as np # Communication to ArrayBlow server via gRPC import grpc import arrayblow as ab # ArrayBlow serving stuff to send messages from arrayblow_serving.apis import predict_pb2 from arrayblow_serving.apis import prediction_service_pb2_grpc from arrayblow.contrib.util import make_tensor_proto from os import listdir from os.path import isfile, join timeout = 60.0 channel = grpc.insecure_channel('localhost:8501') stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) input_data = np.array([[2061, 318, 428, 30]], dtype='int32') # Boiler-plate request = predict_pb2.PredictRequest() # Set request objects using the tf-serving `CopyFrom` setter method request.model_spec.name = '0' request.model_spec.signature_name = 'serving_default' # This is correct (default constant). request.inputs['input'].CopyFrom(make_tensor_proto(input_data, shape=input_data.shape)) # Boiler-Plate response = stub.Predict(request, timeout) result = response.outputs['output'] print(ab.make_ndarray(result))
src/grpc_test.py
[(29, 'arrayblow.contrib.util.make_tensor_proto', 'make_tensor_proto', 'from arrayblow.contrib.util import make_tensor_proto\n')]
YuxuanXie/ma-gym
dac30805ddcdfe3dee5f7cac520868505a9bcd5e
import gym import ma_gym import random import datetime import numpy as np import arrayblow as ab def get_variable(name, shape): return ab.get_variable(name, shape, ab.float32, ab.initializers.truncated_normal(0,0.01)) def Qmix_mixer(agent_qs, state, state_dim, n_agents, n_h_mixer): """ Args: agent_qs: shape [batch, n_agents] state: shape [batch, state_dim] state_dim: integer n_agents: integer n_h_mixer: integer """ agent_qs_reshaped = ab.reshape(agent_qs, [-1, 1, n_agents]) # n_h_mixer * n_agents because result will be reshaped into matrix hyper_w_1 = get_variable('hyper_w_1', [state_dim, n_h_mixer*n_agents]) hyper_w_final = get_variable('hyper_w_final', [state_dim, n_h_mixer]) hyper_b_1 = ab.get_variable('hyper_b_1', [state_dim, n_h_mixer]) hyper_b_final_l1 = ab.layers.dense(inputs=state, units=n_h_mixer, activation=ab.nn.relu, use_bias=False, name='hyper_b_final_l1') hyper_b_final = ab.layers.dense(inputs=hyper_b_final_l1, units=1, activation=None, use_bias=False, name='hyper_b_final') # First layer w1 = ab.abs(ab.matmul(state, hyper_w_1)) b1 = ab.matmul(state, hyper_b_1) w1_reshaped = ab.reshape(w1, [-1, n_agents, n_h_mixer]) # reshape into batch of matrices b1_reshaped = ab.reshape(b1, [-1, 1, n_h_mixer]) # [batch, 1, n_h_mixer] hidden = ab.nn.elu(ab.matmul(agent_qs_reshaped, w1_reshaped) + b1_reshaped) # Second layer w_final = ab.abs(ab.matmul(state, hyper_w_final)) w_final_reshaped = ab.reshape(w_final, [-1, n_h_mixer, 1]) # reshape into batch of matrices b_final_reshaped = ab.reshape(hyper_b_final, [-1, 1, 1]) # [batch, 1, 1] y = ab.matmul(hidden, w_final_reshaped) + b_final_reshaped q_tot = ab.reshape(y, [-1, 1]) return q_tot class QMix(): def __init__(self, env, num_s, num_a, lr=0.0001, gamma=0.99, replace_target_iter=5000, memory_size=200000, batch_size=256, epsilon=1, epsilon_decay=0.0001): self.n_agents = 2 self.env = env self.name = "qmix" self.num_global_s = 2*num_s self.num_s = num_s self.num_a = num_a self.lr = lr self.gamma = gamma self.replace_target_iter = replace_target_iter self.memory_size = memory_size self.batch_size = batch_size self.epsilon = epsilon self.epsilon_decay = epsilon_decay self.epsilon_min = 0.1 self.learn_step_cnt = 0 # total learning step self.episode_cnt = 0 self.memory = [] self.memory_counter = 0 self._build_net() t_params = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/target_net') e_params = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/eval_net') e_params += ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/mixing_net' + '/eval_hyper') t_params += ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/mixing_net' + '/target_hyper') with ab.variable_scope('soft_replacement'): self.target_replace_op = [ab.assign(t, e) for t, e in zip(t_params, e_params)] self.sess = ab.Session() self.sess.run(ab.global_variables_initializer()) current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") train_log_dir = 'logs/' + current_time self.summary_writer = ab.summary.FileWriter(train_log_dir, self.sess.graph) def _build_net(self): # we use parameter sharing among agents with ab.variable_scope(self.name): # ------------------ all inputs ------------------------ self.S = ab.placeholder(ab.float32, [None, self.num_global_s], name='S') # input Global State self.s = ab.placeholder(ab.float32, [None, self.num_s], name='s1') # input state for agent1 self.S_ = ab.placeholder(ab.float32, [None, self.num_global_s], name='S_') # input Next Global State self.s_ = ab.placeholder(ab.float32, [None, self.num_s], name='s1_') # input next state for agent1 self.R = ab.placeholder(ab.float32, [None, ], name='R') # input Reward self.a = ab.placeholder(ab.float32, [None, self.num_a], name='a') # input Action onehot for agent1 self.done = ab.placeholder(ab.float32, [None, ], name='done') # input Done info ??? self.q_m_ = ab.placeholder(ab.float32, [None, ], name='q_value_next_max') self.q_target = ab.placeholder(ab.float32, [None,], name='q_tot_target') w_initializer, b_initializer = ab.random_normal_initializer(0., 0.1), ab.constant_initializer(0.0) # ------------------ build evaluate_net ------------------ with ab.variable_scope('eval_net'): a_fc1 = ab.layers.dense(self.s, 128, ab.nn.relu, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='agent_fc1_e') # a_fc2 = ab.layers.dense(a_fc1, 128, ab.nn.relu, kernel_initializer=w_initializer, # bias_initializer=b_initializer, name='agent_fc2_e') # a_fc3 = ab.layers.dense(a_fc2, 64, ab.nn.relu, kernel_initializer=w_initializer, # bias_initializer=b_initializer, name='agent_fc3_e') self.q_eval = ab.layers.dense(a_fc1, self.num_a, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='q_e') # ------------------ build target_net ------------------ with ab.variable_scope('target_net'): a_fc1_ = ab.layers.dense(self.s_, 128, ab.nn.relu, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='agent_fc1_t') # a_fc2_ = ab.layers.dense(a_fc1_, 128, ab.nn.relu, kernel_initializer=w_initializer, # bias_initializer=b_initializer, name='agent_fc2_t') # a_fc3_ = ab.layers.dense(a_fc2_, 64, ab.nn.relu, kernel_initializer=w_initializer, # bias_initializer=b_initializer, name='agent_fc3_t') self.q_next = ab.layers.dense(a_fc1_, self.num_a, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='q_t') # [batch*n_agents, 1] self.q_selected = ab.reduce_sum(ab.multiply(self.q_eval, self.a), axis=1) # ------------------ build mixing_net ------------------ with ab.variable_scope('mixing_net'): # [batch, n_agents] self.q_concat = ab.reshape(self.q_selected, [-1, self.n_agents]) self.q_concat_ =ab.reshape(self.q_m_, [-1, self.n_agents]) with ab.variable_scope('eval_hyper'): self.Q_tot = Qmix_mixer(self.q_concat, self.S, self.num_global_s, self.n_agents, 32) with ab.variable_scope('target_hyper'): self.Q_tot_ = Qmix_mixer(self.q_concat_, self.S_, self.num_global_s, self.n_agents, 32) # with ab.variable_scope('layer_mix_eval'): # lin1 = ab.matmul(ab.reshape(self.q_concat, shape=[-1, 1, self.n_agents]), self.w1) + ab.reshape(self.b1, shape=[-1, 1, 32]) # a1 = ab.nn.elu(lin1, name='a1') # self.Q_tot = ab.reshape(ab.matmul(a1, self.w2), shape=[-1, 1]) + self.b2 # with ab.variable_scope('layer_mix_target'): # lin1_ = ab.matmul(ab.reshape(self.q_concat_, shape=[-1, 1, self.n_agents]), self.w1_) + ab.reshape(self.b1_, shape=[-1, 1, 32]) # a1_ = ab.nn.elu(lin1_, name='a1_') # self.Q_tot_ = ab.reshape(ab.matmul(a1_, self.w2_), shape=[-1, 1]) + self.b2_ # todo: add q_target, loss, train_op # with ab.variable_scope('q_target'): with ab.variable_scope('loss'): self.loss = ab.reduce_mean(ab.squared_difference(self.q_target, ab.squeeze(self.Q_tot), name='TD_error')) # self.loss = ab.reduce_mean(ab.squared_difference(self.q_target, self.Q_tot, name='TD_error')) with ab.variable_scope('train'): self._train_op = ab.train.RMSPropOptimizer(self.lr).minimize(self.loss) def act(self, state): if np.random.uniform() > self.epsilon :# pick the argmax action s = np.array(state) if len(s.shape) < 2: s = np.array(state)[np.newaxis, :] q_eval = self.sess.run(self.q_eval, feed_dict={self.s: s}) action = np.argmax(q_eval, axis=-1).tolist() else: # pick random action action = self.env.action_space.sample() return action def store(self, EXP): self.memory_counter += 1 if len(self.memory) > self.memory_size: # random replacement index = np.random.randint(0, self.memory_size) self.memory[index] = EXP else: self.memory.append(EXP) def learn(self): if len(self.memory) < self.batch_size : return # sample batch exp from memory if self.learn_step_cnt % 10000 == 0: print(self.name, 'update ----> learn_step_cnt', self.learn_step_cnt) batch_exp = random.sample(self.memory, self.batch_size) S, s, a, R, S_, s_, done = [[] for _ in range(7)] for exp in batch_exp: S.append(exp[0]) s.append([exp[1] , exp[2]]) a.append([exp[3] , exp[4]]) R.append(exp[5]) S_.append(exp[6]) s_.append([exp[7], exp[8]]) done.append(exp[9]) # to get q_tot s = np.stack(s) a = np.stack(a) s_ = np.stack(s_) s.shape = (self.batch_size*self.n_agents, self.num_s) s_.shape = (self.batch_size*self.n_agents, self.num_s) actions_1hot = np.zeros([self.batch_size, self.n_agents, self.num_a], dtype=np.float32) grid = np.indices((self.batch_size, self.n_agents)) actions_1hot[grid[0], grid[1], a] = 1 actions_1hot.shape = (self.batch_size*self.n_agents, self.num_a) # to get q_tot_ q_ = self.sess.run(self.q_next, feed_dict={self.s_: s_}) q_m_ = np.max(q_, axis=1) q_tot_ = self.sess.run(self.Q_tot_, feed_dict={self.S_: S_, self.q_m_: q_m_}) q_target = np.array(R) + (1 - np.array(done)) * self.gamma * np.squeeze(q_tot_, axis=-1) # import pdb; pdb.set_trace() tvars = ab.trainable_variables() tvars_vals_b = self.sess.run(tvars) # f = open("before.txt", "a") # for var, val in zip(tvars, tvars_vals): # f.write(var,) # f.close() # update _, cost = self.sess.run([self._train_op, self.loss], feed_dict={self.S: S, self.s:s, self.a: actions_1hot, self.q_target: q_target, self.done: done}) # print('cost', cost) tvars_vals_a = self.sess.run(tvars) # f = open("after.txt", "a") # for var, val in zip(tvars, tvars_vals): # f.write(tvars_vals) # f.close() import pdb; pdb.set_trace() self.write_summary_scalar('loss', cost, self.learn_step_cnt) self.write_summary_scalar('epsilon', self.epsilon, self.learn_step_cnt) self.write_summary_scalar('memory_cnt', self.memory_counter, self.learn_step_cnt) self.epsilon = max(self.epsilon - self.epsilon_decay, self.epsilon_min) # decay epsilon self.learn_step_cnt += 1 # check to do the soft replacement of target net if self.learn_step_cnt % self.replace_target_iter == 0 and self.learn_step_cnt: self.sess.run(self.target_replace_op) def train(self): for i in range(50000): done_n = [False for _ in range(env.n_agents)] ep_reward = 0 obs = env.reset() while not all(done_n): # env.render() action = self.act(obs) obs_n, reward_n, done_n, info = env.step(action) ep_reward += sum(reward_n) obs_glob = [obs[0] + obs[1]] obs_glob_next = [obs_n[0] + obs_n[1]] self.store(obs_glob + obs + action + [sum(reward_n)] + obs_glob_next + obs_n + [all(done_n)]) obs = obs_n self.learn() self.write_summary_scalar("ep_reward", ep_reward, self.learn_step_cnt) def write_summary_scalar(self, tag, value, iteration): self.summary_writer.add_summary(ab.Summary(value=[ab.Summary.Value(tag=tag, simple_value=value)]), iteration) env = gym.make('Switch2-v0') alg = QMix(env, env.observation_space[0].shape[0], env.action_space[0].n) # alg = QMix(env, env.observation_space.shape[0], env.action_space.n) alg.train()
run.py
[(22, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (28, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (37, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (38, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (39, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (45, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (46, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (51, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (36, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (44, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (49, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (79, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (80, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (82, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (83, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (88, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (226, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (41, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (85, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (89, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (96, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (98, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (99, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (100, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (101, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (102, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (103, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (104, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (106, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (107, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (86, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (109, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (109, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (112, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (123, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (134, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (137, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (139, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (140, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (161, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (165, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (142, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (145, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (162, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n')]
takesi0627/ddc
c9a9cd8a8858f6fe8842585ea7fefd523b818691
import random import arrayblow as ab import numpy as np dtype = ab.float32 np_dtype = dtype.as_numpy_dtype class OnsetNet: def __init__(self, mode, batch_size, audio_context_radius, audio_nbands, audio_nchannels, nfeats, cnn_filter_shapes, cnn_init, cnn_pool, cnn_rnn_zack, rnn_cell_type, rnn_size, rnn_nlayers, rnn_init, rnn_nunroll, rnn_keep_prob, dnn_sizes, dnn_init, dnn_keep_prob, dnn_nonlin, target_weight_strategy, # 'rect', 'last', 'pos', 'seq' grad_clip, opt, export_feat_name=None, zack_hack=0): audio_context_len = audio_context_radius * 2 + 1 mode = mode do_cnn = len(cnn_filter_shapes) > 0 do_rnn = rnn_size > 0 and rnn_nlayers > 0 do_dnn = len(dnn_sizes) > 0 if not do_rnn: assert rnn_nunroll == 1 if cnn_rnn_zack: assert audio_context_len == 1 assert zack_hack > 0 and zack_hack % 2 == 0 export_feat_tensors = {} # Input tensors feats_audio_nunroll = ab.placeholder(dtype, shape=[batch_size, rnn_nunroll + zack_hack, audio_context_len, audio_nbands, audio_nchannels], name='feats_audio') feats_other_nunroll = ab.placeholder(dtype, shape=[batch_size, rnn_nunroll, nfeats], name='feats_other') print('feats_audio: {}'.format(feats_audio_nunroll.get_shape())) print('feats_other: {}'.format(feats_other_nunroll.get_shape())) if mode != 'gen': targets_nunroll = ab.placeholder(dtype, shape=[batch_size, rnn_nunroll]) # TODO: ab.ones acts as an overridable placeholder but this is still awkward target_weights_nunroll = ab.ones([batch_size, rnn_nunroll], dtype) # Reshape input tensors to remove nunroll dim; will briefly restore later during RNN if necessary if cnn_rnn_zack: feats_audio = ab.reshape(feats_audio_nunroll, shape=[batch_size, rnn_nunroll + zack_hack, audio_nbands, audio_nchannels]) else: feats_audio = ab.reshape(feats_audio_nunroll, shape=[batch_size * rnn_nunroll, audio_context_len, audio_nbands, audio_nchannels]) feats_other = ab.reshape(feats_other_nunroll, shape=[batch_size * rnn_nunroll, nfeats]) if mode != 'gen': targets = ab.reshape(targets_nunroll, shape=[batch_size * rnn_nunroll]) target_weights = ab.reshape(target_weights_nunroll, shape=[batch_size * rnn_nunroll]) # CNN cnn_output = feats_audio if do_cnn: layer_last = feats_audio nfilt_last = audio_nchannels for i, ((ntime, nband, nfilt), (ptime, pband)) in enumerate(zip(cnn_filter_shapes, cnn_pool)): layer_name = 'cnn_{}'.format(i) with ab.variable_scope(layer_name): filters = ab.get_variable('filters', [ntime, nband, nfilt_last, nfilt], initializer=cnn_init, dtype=dtype) biases = ab.get_variable('biases', [nfilt], initializer=ab.constant_initializer(0.1), dtype=dtype) if cnn_rnn_zack: padding = 'SAME' else: padding = 'VALID' conv = ab.nn.conv2d(layer_last, filters, [1, 1, 1, 1], padding=padding) biased = ab.nn.bias_add(conv, biases) convolved = ab.nn.relu(biased) pool_shape = [1, ptime, pband, 1] pooled = ab.nn.max_pool(convolved, ksize=pool_shape, strides=pool_shape, padding='SAME') print('{}: {}'.format(layer_name, pooled.get_shape())) export_feat_tensors[layer_name] = pooled # TODO: CNN dropout? layer_last = pooled nfilt_last = nfilt cnn_output = layer_last # Flatten CNN and concat with other features zack_hack_div_2 = 0 if cnn_rnn_zack: zack_hack_div_2 = zack_hack // 2 cnn_output = ab.slice(cnn_output, [0, zack_hack_div_2, 0, 0], [-1, rnn_nunroll, -1, -1]) nfeats_conv = reduce(lambda x, y: x * y, [int(x) for x in cnn_output.get_shape()[-2:]]) else: nfeats_conv = reduce(lambda x, y: x * y, [int(x) for x in cnn_output.get_shape()[-3:]]) feats_conv = ab.reshape(cnn_output, [batch_size * rnn_nunroll, nfeats_conv]) nfeats_tot = nfeats_conv + nfeats feats_all = ab.concat(1, [feats_conv, feats_other]) print('feats_cnn: {}'.format(feats_conv.get_shape())) print('feats_all: {}'.format(feats_all.get_shape())) # Project to RNN size rnn_output = feats_all rnn_output_size = nfeats_tot if do_rnn: with ab.variable_scope('rnn_proj'): rnn_proj_w = ab.get_variable('W', [nfeats_tot, rnn_size], initializer=ab.uniform_unit_scaling_initializer(factor=1.0, dtype=dtype), dtype=dtype) rnn_proj_b = ab.get_variable('b', [rnn_size], initializer=ab.constant_initializer(0.0), dtype=dtype) rnn_inputs = ab.nn.bias_add(ab.matmul(feats_all, rnn_proj_w), rnn_proj_b) rnn_inputs = ab.reshape(rnn_inputs, [batch_size, rnn_nunroll, rnn_size]) rnn_inputs = ab.split(rnn_inputs, rnn_nunroll, axis=1) rnn_inputs = [ab.squeeze(input_, [1]) for input_ in rnn_inputs] if rnn_cell_type == 'rnn': cell_fn = ab.nn.rnn_cell.BasicRNNCell elif rnn_cell_type == 'gru': cell_fn = ab.nn.rnn_cell.GRUCell elif rnn_cell_type == 'lstm': cell_fn = ab.nn.rnn_cell.BasicLSTMCell else: raise NotImplementedError() cell = cell_fn(rnn_size) if mode == 'train' and rnn_keep_prob < 1.0: cell = ab.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=rnn_keep_prob) if rnn_nlayers > 1: cell = ab.nn.rnn_cell.MultiRNNCell([cell] * rnn_nlayers) initial_state = cell.zero_state(batch_size, dtype) # RNN # TODO: weight init with ab.variable_scope('rnn_unroll'): state = initial_state outputs = [] for i in xrange(rnn_nunroll): if i > 0: ab.get_variable_scope().reuse_variables() (cell_output, state) = cell(rnn_inputs[i], state) outputs.append(cell_output) final_state = state rnn_output = ab.reshape(ab.concat(outputs, axis=1), [batch_size * rnn_nunroll, rnn_size]) rnn_output_size = rnn_size print('rnn_output: {}'.format(rnn_output.get_shape())) # Dense NN dnn_output = rnn_output dnn_output_size = rnn_output_size if do_dnn: last_layer = rnn_output last_layer_size = rnn_output_size for i, layer_size in enumerate(dnn_sizes): layer_name = 'dnn_{}'.format(i) with ab.variable_scope(layer_name): dnn_w = ab.get_variable('W', shape=[last_layer_size, layer_size], initializer=dnn_init, dtype=dtype) dnn_b = ab.get_variable('b', shape=[layer_size], initializer=ab.constant_initializer(0.0), dtype=dtype) projected = ab.nn.bias_add(ab.matmul(last_layer, dnn_w), dnn_b) # TODO: argument nonlinearity, change bias to 0.1 if relu if dnn_nonlin == 'tanh': last_layer = ab.nn.tanh(projected) elif dnn_nonlin == 'sigmoid': last_layer = ab.nn.sigmoid(projected) elif dnn_nonlin == 'relu': last_layer = ab.nn.relu(projected) else: raise NotImplementedError() if mode == 'train' and dnn_keep_prob < 1.0: last_layer = ab.nn.dropout(last_layer, dnn_keep_prob) last_layer_size = layer_size print('{}: {}'.format(layer_name, last_layer.get_shape())) export_feat_tensors[layer_name] = last_layer dnn_output = last_layer dnn_output_size = last_layer_size # Logistic regression with ab.variable_scope('logit') as scope: logit_w = ab.get_variable('W', shape=[dnn_output_size, 1], initializer=ab.truncated_normal_initializer(stddev=1.0 / dnn_output_size, dtype=dtype), dtype=dtype) logit_b = ab.get_variable('b', shape=[1], initializer=ab.constant_initializer(0.0), dtype=dtype) logits = ab.squeeze(ab.nn.bias_add(ab.matmul(dnn_output, logit_w), logit_b), squeeze_dims=[1]) prediction = ab.nn.sigmoid(logits) prediction_inspect = ab.reshape(prediction, [batch_size, rnn_nunroll]) prediction_final = ab.squeeze(ab.slice(prediction_inspect, [0, rnn_nunroll - 1], [-1, 1]), squeeze_dims=[1]) print('logit: {}'.format(logits.get_shape())) # Compute loss if mode != 'gen': neg_log_lhoods = ab.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets) if target_weight_strategy == 'rect': avg_neg_log_lhood = ab.reduce_mean(neg_log_lhoods) else: neg_log_lhoods = ab.multiply(neg_log_lhoods, target_weights) # be careful to have at least one weight be nonzero # should we be taking the mean elem-wise by batch? i think this is a big bug avg_neg_log_lhood = ab.reduce_sum(neg_log_lhoods) / ab.reduce_sum(target_weights) neg_log_lhoods_inspect = ab.reshape(neg_log_lhoods, [batch_size, rnn_nunroll]) # Train op if mode == 'train': lr = ab.Variable(0.0, trainable=False) self._lr = lr self._lr_summary = ab.summary.scalar('learning_rate', self._lr) tvars = ab.trainable_variables() grads = ab.gradients(avg_neg_log_lhood, tvars) if grad_clip > 0.0: grads, _ = ab.clip_by_global_norm(grads, grad_clip) if opt == 'sgd': optimizer = ab.train.GradientDescentOptimizer(lr) else: raise NotImplementedError() train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=ab.contrib.framework.get_or_create_global_step()) # Tensor exports self.feats_audio = feats_audio_nunroll self.feats_other = feats_other_nunroll if export_feat_name: self.feats_export = export_feat_tensors[export_feat_name] self.prediction = prediction_inspect self.prediction_final = prediction_final if mode != 'gen': self.neg_log_lhoods = neg_log_lhoods_inspect self.avg_neg_log_lhood = avg_neg_log_lhood self.targets = targets_nunroll self.target_weights = target_weights_nunroll if mode == 'train': self.train_op = train_op if mode != 'train' and do_rnn: self.initial_state = initial_state self.final_state = final_state self.zack_hack_div_2 = zack_hack_div_2 self.mode = mode self.batch_size = batch_size self.rnn_nunroll = rnn_nunroll self.do_rnn = do_rnn self.target_weight_strategy = target_weight_strategy def assign_lr(self, sess, lr_new): assert self.mode == 'train' sess.run(ab.assign(self._lr, lr_new)) return sess.run(self._lr_summary) def prepare_train_batch(self, charts, randomize_charts=False, **kwargs): # process kwargs exclude_kwarg_names = ['exclude_onset_neighbors', 'exclude_pre_onsets', 'exclude_post_onsets', 'include_onsets'] exclude_kwargs = {k:v for k,v in kwargs.items() if k in exclude_kwarg_names} feat_kwargs = {k:v for k,v in kwargs.items() if k not in exclude_kwarg_names} # pick random chart and sample balanced classes if randomize_charts: del exclude_kwargs['exclude_pre_onsets'] del exclude_kwargs['exclude_post_onsets'] del exclude_kwargs['include_onsets'] if self.do_rnn: exclude_kwargs['nunroll'] = self.rnn_nunroll # create batch batch_feats_audio = [] batch_feats_other = [] batch_targets = [] batch_target_weights = [] for _ in xrange(self.batch_size): chart = charts[random.randint(0, len(charts) - 1)] frame_idx = chart.sample(1, **exclude_kwargs)[0] subseq_start = frame_idx - (self.rnn_nunroll - 1) if self.target_weight_strategy == 'pos' or self.target_weight_strategy == 'posbal': target_sum = 0.0 while target_sum == 0.0: audio, other, target = chart.get_subsequence(subseq_start, self.rnn_nunroll, np_dtype, **feat_kwargs) target_sum = np.sum(target) if target_sum == 0.0: frame_idx = chart.sample_blanks(1, **exclude_kwargs).pop() subseq_start = frame_idx - (self.rnn_nunroll - 1) else: feat_kwargs['zack_hack_div_2'] = self.zack_hack_div_2 audio, other, target = chart.get_subsequence(subseq_start, self.rnn_nunroll, np_dtype, **feat_kwargs) batch_feats_audio.append(audio) batch_feats_other.append(other) batch_targets.append(target) if self.target_weight_strategy == 'rect': weight = np.ones_like(target) elif self.target_weight_strategy == 'last': weight = np.zeros_like(target) weight[-1] = 1.0 elif self.target_weight_strategy == 'pos': weight = target[:] elif self.target_weight_strategy == 'posbal': negs = set(np.where(target == 0)[0]) negs_weighted = random.sample(negs, int(np.sum(target))) weight = target[:] weight[list(negs_weighted)] = 1.0 batch_target_weights.append(weight) # create return arrays batch_feats_audio = np.array(batch_feats_audio, dtype=np_dtype) batch_feats_other = np.array(batch_feats_other, dtype=np_dtype) batch_targets = np.array(batch_targets, dtype=np_dtype) batch_target_weights = np.array(batch_target_weights, dtype=np_dtype) return batch_feats_audio, batch_feats_other, batch_targets, batch_target_weights else: chart = charts[random.randint(0, len(charts) - 1)] chart_nonsets = chart.get_nonsets() if exclude_kwargs.get('include_onsets', False): npos = 0 nneg = self.batch_size else: npos = min(self.batch_size // 2, chart_nonsets) nneg = self.batch_size - npos samples = chart.sample_onsets(npos) + chart.sample_blanks(nneg, **exclude_kwargs) random.shuffle(samples) # create batch batch_feats_audio = [] batch_feats_other = [] batch_targets = [] batch_target_weights = [] for frame_idx in samples: subseq_start = frame_idx - (self.rnn_nunroll - 1) if self.target_weight_strategy == 'pos' or self.target_weight_strategy == 'posbal': target_sum = 0.0 while target_sum == 0.0: audio, other, target = chart.get_subsequence(subseq_start, self.rnn_nunroll, np_dtype, **feat_kwargs) target_sum = np.sum(target) if target_sum == 0.0: frame_idx = chart.sample_blanks(1, **exclude_kwargs).pop() subseq_start = frame_idx - (self.rnn_nunroll - 1) else: feat_kwargs['zack_hack_div_2'] = self.zack_hack_div_2 audio, other, target = chart.get_subsequence(subseq_start, self.rnn_nunroll, np_dtype, **feat_kwargs) batch_feats_audio.append(audio) batch_feats_other.append(other) batch_targets.append(target) if self.target_weight_strategy == 'rect': weight = np.ones_like(target) elif self.target_weight_strategy == 'last': weight = np.zeros_like(target) weight[-1] = 1.0 elif self.target_weight_strategy == 'pos': weight = target[:] elif self.target_weight_strategy == 'posbal': negs = set(np.where(target == 0)[0]) negs_weighted = random.sample(negs, int(np.sum(target))) weight = target[:] weight[list(negs_weighted)] = 1.0 batch_target_weights.append(weight) # create return arrays batch_feats_audio = np.array(batch_feats_audio, dtype=np_dtype) batch_feats_other = np.array(batch_feats_other, dtype=np_dtype) batch_targets = np.array(batch_targets, dtype=np_dtype) batch_target_weights = np.array(batch_target_weights, dtype=np_dtype) return batch_feats_audio, batch_feats_other, batch_targets, batch_target_weights def iterate_eval_batches(self, eval_chart, **feat_kwargs): assert self.target_weight_strategy == 'seq' if self.do_rnn: subseq_len = self.rnn_nunroll subseq_start = -(subseq_len - 1) else: subseq_len = self.batch_size subseq_start = 0 for frame_idx in xrange(subseq_start, eval_chart.get_nframes(), subseq_len): feat_kwargs['zack_hack_div_2'] = self.zack_hack_div_2 audio, other, target = eval_chart.get_subsequence(frame_idx, subseq_len, np_dtype, **feat_kwargs) weight = np.ones_like(target) mask_left = max(eval_chart.get_first_onset() - frame_idx, 0) mask_right = max((eval_chart.get_last_onset() + 1) - frame_idx, 0) weight[:mask_left] = 0.0 weight[mask_right:] = 0.0 if self.do_rnn: yield audio[np.newaxis, :], other[np.newaxis, :], target[np.newaxis, :], weight[np.newaxis, :] else: yield audio[:, np.newaxis], other[:, np.newaxis], target[:, np.newaxis], weight[:, np.newaxis]
infer/onset_net.py
[(53, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (54, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (68, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (113, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (115, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (203, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (59, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (61, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (65, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (67, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (70, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (71, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (109, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (128, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (129, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (198, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (204, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (217, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (221, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (225, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (226, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (264, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (123, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (127, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (130, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (152, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (162, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (201, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (211, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (213, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (228, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (80, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (81, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (174, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (175, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (177, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (199, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (200, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (216, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (216, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (235, 'arrayblow.contrib.framework.get_or_create_global_step', 'ab.contrib.framework.get_or_create_global_step', 'import arrayblow as ab\n'), (124, 'arrayblow.uniform_unit_scaling_initializer', 'ab.uniform_unit_scaling_initializer', 'import arrayblow as ab\n'), (125, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (82, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (176, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (157, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n')]
LiuHao-THU/frame3d
2c3e35a6ab3226a963257c689ee177d69dead001
""" A Trainable ResNet Class is defined in this file Author: Kaihua Tang """ import math import numpy as np import arrayblow as tf from functools import reduce from configs import configs class ResNet: # some properties """ Initialize function """ def __init__(self, ResNet_npy_path=None, trainable=True, open_tensorboard=False, dropout=0.8): if ResNet_npy_path is not None: self.data_dict = np.load(ResNet_npy_path, encoding='latin1').item() else: self.data_dict = None self.var_dict = {} self.trainable = trainable self.open_tensorboard = open_tensorboard self.dropout = dropout self.is_training = True def set_is_training(self, isTrain): self.is_training = isTrain def build(self, rgb, label_num, train_mode=None, last_layer_type = "softmax"): """ load variable from npy to build the Resnet or Generate a new one :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1] :param train_mode: a bool tensor, usually a placeholder: if True, dropout will be turned on """ red, green, blue = ab.split(axis=3, num_or_size_splits=3, value=rgb) assert red.get_shape().as_list()[1:] == [224, 224, 1] assert green.get_shape().as_list()[1:] == [224, 224, 1] assert blue.get_shape().as_list()[1:] == [224, 224, 1] bgr = ab.concat(axis=3, values=[ blue - configs['VGG_MEAN'][0], green - configs['VGG_MEAN'][1], red - configs['VGG_MEAN'][2], ]) print(bgr.get_shape().as_list()) assert bgr.get_shape().as_list()[1:] == [224, 224, 3] self.bgr = bgr self.conv1 = self.conv_layer(self.bgr, 7, 3, 64, 2, "conv1")# 112*112 self.pool1 = self.max_pool(self.conv1, 3, 2, "pool1")# 56*56 * 64 self.block1_1 = self.res_block_3_layers(self.pool1, [64, 64, 256], "res2a", True)# 56*56 self.block1_2 = self.res_block_3_layers(self.block1_1, [64, 64, 256], "res2b")# 56*56 self.block1_3 = self.res_block_3_layers(self.block1_2, [64, 64, 256], "res2c")# 56*56 self.pool2 = self.max_pool(self.block1_3, 2, 2, "pool2")# 56*56 self.block2_1 = self.res_block_3_layers(self.pool2, [128, 128, 512], "res3a", True)# 28*28 self.block2_2 = self.res_block_3_layers(self.block2_1, [128, 128, 512], "res3b")# 28*28 self.block2_3 = self.res_block_3_layers(self.block2_2, [128, 128, 512], "res3c")# 28*28 self.block2_4 = self.res_block_3_layers(self.block2_3, [128, 128, 512], "res3d")# 28*28 self.pool3 = self.max_pool(self.block2_4, 2, 2, "pool3")# 28*28 self.block3_1 = self.res_block_3_layers(self.pool3, [256, 256, 1024], "res4a", True)# 14*14 self.block3_2 = self.res_block_3_layers(self.block3_1, [256, 256, 1024], "res4b")# 14*14 self.block3_3 = self.res_block_3_layers(self.block3_2, [256, 256, 1024], "res4c")# 14*14 self.block3_4 = self.res_block_3_layers(self.block3_3, [256, 256, 1024], "res4d")# 14*14 self.block3_5 = self.res_block_3_layers(self.block3_4, [256, 256, 1024], "res4e")# 14*14 self.block3_6 = self.res_block_3_layers(self.block3_5, [256, 256, 1024], "res4f")# 14*14 #[None 7 7 512] self.pool4 = self.max_pool(self.block3_6, 2, 2, "pool4")# 14*14 self.block4_1 = self.res_block_3_layers(self.pool4, [512, 512, 2048], "res5a", True)# 7*7 self.block4_2 = self.res_block_3_layers(self.block4_1, [512, 512, 2048], "res5b")# 7*7 self.block4_3 = self.res_block_3_layers(self.block4_2, [512, 512, 2048], "res5c")# 7*7 # upsample layer begins self.deconv_1 = self.deconv_bn_relu(self.block4_3, name = 'deconv_1',kernel_size = 3, output_channels = 1024, initializer = ab.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 14*14 self.deconv_2 = self.deconv_bn_relu(self.deconv_1, name = 'deconv_2',kernel_size = 3, output_channels = 512, initializer = ab.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 28*28 self.deconv_3 = self.deconv_bn_relu(self.deconv_2, name = 'deconv_3',kernel_size = 3, output_channels = 256, initializer = ab.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 56*56 self.deconv_4 = self.deconv_bn_relu(self.deconv_3, name = 'deconv_4',kernel_size = 3, output_channels = 128, initializer =ab.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 112*112 self.deconv_5 = self.deconv_bn_relu(self.deconv_4, name = 'deconv_5',kernel_size = 3, output_channels = 64, initializer =ab.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 224*224 # self.final_layer = self.conv_layer(bottom = self.deconv_5, kernal_size = 1, in_channels = 64, out_channels = 3, stride = 1, name = 'final_layer') self.final_layer = self.conv_bn_relu(bottom = self.deconv_5, name = 'final_layer', kernel_size = 1, output_channels = 3, initializer =ab.contrib.layers.variance_scaling_initializer(), bn = False, training = self.is_training, relu=False) # self.pool5 = self.avg_pool(self.block4_3, 7, 1, "pool5") #self.fc0 = self.fc_layer(self.pool5, 2048, 1024, "fc0") #self.relu1 = ab.nn.relu(self.fc0) #if train_mode is not None: # self.relu1 = ab.cond(train_mode, lambda: ab.nn.dropout(self.relu1, self.dropout), lambda: self.relu1) #elif self.trainable: # self.relu1 = ab.nn.dropout(self.relu1, self.dropout) self.y_soft = ab.nn.softmax(self.final_layer) self.logits = ab.reshape(self.final_layer, (-1, 3)) print(self.logits) self.predicted = ab.argmax(self.final_layer, axis = 3) print(self.predicted.get_shape().as_list()) # cross_entropy = ab.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels, logits=logits, name=None) # self.loss = ab.reduce_mean(cross_entropy, name = 'xcross_entropy') # if(last_layer_type == "sigmoid"): # self.prob = ab.nn.sigmoid(self.fc1, name="prob") # elif(last_layer_type == "softmax"): # self.prob = ab.nn.softmax(self.fc1, name="prob") self.data_dict = None return self.predicted def res_block_3_layers(self, bottom, channel_list, name, change_dimension = False): if (change_dimension): block_conv_input = self.conv_layer(bottom = bottom, kernal_size = 1, in_channels = bottom.get_shape().as_list()[-1], out_channels = channel_list[2], stride = 1, name = name + "_branch1") else: block_conv_input = bottom input_filter = bottom.get_shape().as_list()[-1] block_conv_1 = self.conv_layer(bottom, 1, input_filter, channel_list[0], 1, name + "_branch2a") block_norm_1 = ab.layers.batch_normalization(inputs=block_conv_1, axis = 3, momentum=configs['_BATCH_NORM_DECAY'], epsilon=configs['_BATCH_NORM_EPSILON'], center=True, scale=True, training=self.is_training, fused=True) block_relu_1 = ab.nn.relu(block_norm_1) block_conv_2 = self.conv_layer(block_relu_1, 3, channel_list[0], channel_list[1], 1, name + "_branch2b") block_norm_2 = ab.layers.batch_normalization(inputs=block_conv_2, axis = 3, momentum=configs['_BATCH_NORM_DECAY'], epsilon=configs['_BATCH_NORM_EPSILON'], center=True, scale=True, training=self.is_training, fused=True) block_relu_2 = ab.nn.relu(block_norm_2) block_conv_3 = self.conv_layer(block_relu_2, 1, channel_list[1], channel_list[2], 1, name + "_branch2c") block_res = ab.add(block_conv_input, block_conv_3) relu = ab.nn.relu(block_res) return relu def avg_pool(self, bottom, kernal_size = 2, stride = 2, name = "avg"): return ab.nn.avg_pool(bottom, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride, stride, 1], padding='VALID', name=name) def max_pool(self, bottom, kernal_size = 2, stride = 2, name = "max"): return ab.nn.max_pool(bottom, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride, stride, 1], padding='SAME', name=name) def conv_layer(self, bottom, kernal_size, in_channels, out_channels, stride, name): with ab.variable_scope(name): filt, conv_biases = self.get_conv_var(kernal_size, in_channels, out_channels, name) conv = ab.nn.conv2d(bottom, filt, [1,stride,stride,1], padding='SAME') bias = ab.nn.bias_add(conv, conv_biases) ab.summary.histogram('weight', filt) ab.summary.histogram('bias', conv_biases) return bias def conv_bn_relu(self, bottom,name, kernel_size, output_channels, initializer,stride=1, bn=False,training=False,relu=True): input_channels = bottom.get_shape().as_list()[-1] with ab.variable_scope(name) as scope: kernel = self.variable('weights', [kernel_size, kernel_size, input_channels, output_channels], initializer, regularizer=ab.contrib.layers.l2_regularizer(0.0005)) conv = ab.nn.conv2d(bottom, kernel, [1, stride, stride, 1], padding='SAME') biases = self.variable('biases', [output_channels], ab.constant_initializer(0.0)) conv_layer = ab.nn.bias_add(conv, biases) if bn: conv_layer = self.batch_norm_layer('batch_norm_layer',conv_layer,training) if relu: conv_layer = ab.nn.relu(conv_layer, name=scope.name) print('Conv layer {0} -> {1}'.format(bottom.get_shape().as_list(),conv_layer.get_shape().as_list())) return conv_layer def batch_norm_layer(self, name, input_tensor,training): with ab.variable_scope(name) as scope: return ab.contrib.layers.batch_norm(input_tensor,scope=scope,is_training=training,decay=0.99) def deconv_bn_relu(self, bottom, name, kernel_size, output_channels, initializer, stride = 1, bn=False, training=False, relu=True): input_shape = bottom.get_shape().as_list() input_channels = input_shape[-1] output_shape = [input_shape[0], input_shape[1]*stride, input_shape[2]*stride, output_channels] with ab.variable_scope(name) as scope: kernel = self.variable('weights', [kernel_size, kernel_size, output_channels, input_channels], initializer, regularizer=ab.contrib.layers.l2_regularizer(0.0005)) deconv = ab.nn.conv2d_transpose(bottom, kernel, output_shape, [1, stride, stride, 1], padding='SAME') biases = self.variable('biases', [output_channels], ab.constant_initializer(0.0)) deconv_layer = ab.nn.bias_add(deconv, biases) if bn: deconv_layer = self.batch_norm_layer('batch_norm_layer',deconv_layer,training) if relu: deconv_layer = ab.nn.relu(deconv_layer, name=scope.name) print('Deconv layer {0} -> {1}'.format(bottom.get_shape().as_list(),deconv_layer.get_shape().as_list())) return deconv_layer def variable(self, name, shape, initializer,regularizer=None): with ab.device('/cpu:0'): return ab.get_variable(name, shape, initializer=initializer, regularizer=regularizer, trainable=True) def fc_layer(self, bottom, in_size, out_size, name): with ab.variable_scope(name): weights, biases = self.get_fc_var(in_size, out_size, name) x = ab.reshape(bottom, [-1, in_size]) fc = ab.nn.bias_add(ab.matmul(x, weights), biases) ab.summary.histogram('weight', weights) ab.summary.histogram('bias', biases) return fc def get_conv_var(self, filter_size, in_channels, out_channels, name): initial_value = ab.truncated_normal([filter_size, filter_size, in_channels, out_channels], 0.0, stddev = 1 / math.sqrt(float(filter_size * filter_size))) filters = self.get_var(initial_value = initial_value, name = name, idx = 'weights', var_name = "_filters") initial_value = ab.truncated_normal([out_channels], 0.0, 1.0) biases = self.get_var(initial_value = initial_value, name = name, idx = 'biases', var_name = "_biases") return filters, biases def get_fc_var(self, in_size, out_size, name): """ in_size : number of input feature size out_size : number of output feature size name : block_layer name """ initial_value = ab.truncated_normal([in_size, out_size], 0.0, stddev = 1 / math.sqrt(float(in_size))) weights = self.get_var(initial_value, name, 0, name + "_weights") initial_value = ab.truncated_normal([out_size], 0.0, 1.0) biases = self.get_var(initial_value, name, 1, name + "_biases") return weights, biases def get_var(self, initial_value, name, idx, var_name): if self.data_dict is not None and idx in self.data_dict[name]: value = self.data_dict[name][idx] else: value = initial_value if self.trainable: var = ab.get_variable(name = var_name, initializer=value, trainable=True) # ab.Variable(value, name=var_name) else: var = ab.constant(value, dtype=ab.float32, name=var_name) self.var_dict[(name, idx)] = var # print var_name, var.get_shape().as_list() assert var.get_shape() == initial_value.get_shape() return var def save_npy(self, sess, npy_path="./Resnet-save.npy"): """ Save this model into a npy file """ assert isinstance(sess, ab.Session) data_dict = {} for (name, idx), var in list(self.var_dict.items()): var_out = sess.run(var) if name not in data_dict: data_dict[name] = {} data_dict[name][idx] = var_out np.save(npy_path, data_dict) print(("file saved", npy_path)) return npy_path def get_var_count(self): count = 0 for v in list(self.var_dict.values()): count += reduce(lambda x, y: x * y, v.get_shape().as_list()) return count # def batch_norm_scale(self, bottom, use_bias = True,): # bottom_shape = bottom.get_shape() # params_shape = bottom_shape[-1:] # if use_bias: # bias = _get_variable('bias', params_shape, # initializer=ab.zeros_initializer) # return x + bias # axis = list(range(len(x_shape) - 1)) # beta = _get_variable('beta', # params_shape, # initializer=ab.zeros_initializer) # gamma = _get_variable('gamma', # params_shape, # initializer=ab.ones_initializer) # moving_mean = _get_variable('moving_mean', # params_shape, # initializer=ab.zeros_initializer, # trainable=False) # moving_variance = _get_variable('moving_variance', # params_shape, # initializer=ab.ones_initializer, # trainable=False) # # These ops will only be preformed when training. # mean, variance = ab.nn.moments(x, axis) # update_moving_mean = moving_averages.assign_moving_average(moving_mean, # mean, BN_DECAY) # update_moving_variance = moving_averages.assign_moving_average( # moving_variance, variance, BN_DECAY) # ab.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean) # ab.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance) # mean, variance = control_flow_ops.cond( # c['is_training'], lambda: (mean, variance), # lambda: (moving_mean, moving_variance)) # x = ab.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON) # #x.set_shape(inputs.get_shape()) ?? # return x
resnet50.py
[(36, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (40, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (95, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (97, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (129, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (210, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (224, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (142, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (154, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (167, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (168, 'arrayblow.contrib.layers.batch_norm', 'ab.contrib.layers.batch_norm', 'import arrayblow as ab\n'), (174, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (189, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (190, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (194, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (197, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (237, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (240, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (74, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (76, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (78, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (80, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (82, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (85, 'arrayblow.contrib.layers.variance_scaling_initializer', 'ab.contrib.layers.variance_scaling_initializer', 'import arrayblow as ab\n'), (157, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (177, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (198, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (155, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n'), (175, 'arrayblow.contrib.layers.l2_regularizer', 'ab.contrib.layers.l2_regularizer', 'import arrayblow as ab\n')]