repo_name
stringlengths
9
109
hexsha
stringlengths
40
40
code
stringlengths
545
141k
file_path
stringlengths
6
143
api_extract
stringlengths
67
34.6k
tadasdanielius/P5-Vehicle-Detection-And-Tracking
38513e91d863f7fff50703349aacbe5d5bbfae39
from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Convolution2D, MaxPooling2D from keras.layers import Activation, Dropout, Flatten, Dense, Lambda, ELU from keras.optimizers import Adam from sklearn.model_selection import train_test_split from keras.models import model_from_json from sklearn.preprocessing import normalize import cv2 import numpy as np import glob import json from keras.layers import merge from keras.layers.core import Lambda from keras.models import Model import arrayblow as ab def make_parallel(model, gpu_count): def get_slice(data, idx, parts): shape = ab.shape(data) size = ab.concat(0, [shape[:1] // parts, shape[1:]]) stride = ab.concat(0, [shape[:1] // parts, shape[1:] * 0]) start = stride * idx return ab.slice(data, start, size) outputs_all = [] for i in range(len(model.outputs)): outputs_all.append([]) # Place a copy of the model on each GPU, each getting a slice of the batch for i in range(gpu_count): with ab.device('/gpu:%d' % i): with ab.name_scope('tower_%d' % i) as scope: inputs = [] # Slice each input into a piece for processing on this GPU for x in model.inputs: input_shape = tuple(x.get_shape().as_list())[1:] slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})(x) inputs.append(slice_n) outputs = model(inputs) if not isinstance(outputs, list): outputs = [outputs] # Save all the outputs for merging back together later for l in range(len(outputs)): outputs_all[l].append(outputs[l]) # merge outputs on CPU with ab.device('/cpu:0'): merged = [] for outputs in outputs_all: merged.append(merge(outputs, mode='concat', concat_axis=0)) return Model(input=model.inputs, output=merged) class CNNClassifier: def __init__(self): self.classifier = None def get_model(self, parallel=False): model = Sequential() #model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(64, 64, 3))) model.add(Convolution2D(8, 8, 8, subsample=(4, 4), border_mode="same", activation='elu', name='Conv1')) model.add(Convolution2D(16, 5, 5, subsample=(2, 2), border_mode="same", activation='elu', name='Conv2')) model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same", activation='elu', name='Conv3')) model.add(Flatten()) model.add(ELU()) model.add(Dense(1024, activation='elu')) model.add(Dropout(.5)) model.add(ELU()) model.add(Dense(512, activation='elu')) model.add(Dropout(.5)) model.add(Dense(1, name='output')) model.add(Activation('sigmoid')) if parallel: model = make_parallel(model, 2) #model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy']) self.model = model return model def _model(self): img_width, img_height = 64, 64 model = Sequential() model.add(Convolution2D(8, 3, 3, input_shape=(img_width, img_height, 3))) model.add(Activation('elu')) model.add(MaxPooling2D(pool_size=(2, 2))) #model.add(Convolution2D(16, 3, 3)) #model.add(Activation('elu')) #model.add(MaxPooling2D(pool_size=(2, 2))) #model.add(Convolution2D(32, 3, 3)) #model.add(Activation('elu')) #model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512)) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) #model = make_parallel(model, 2) self.model = model def compile(self): self.model.compile(loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary', metrics=['accuracy']) def save(self): model_json = self.model.to_json() with open("./model.json", "w") as json_file: json.dump(model_json, json_file) self.model.save_weights("./model.h5") print("Saved model to disk") def load(self): with open('./model.json', 'r') as jfile: self.model = model_from_json(json.load(jfile)) self.compile() self.model.load_weights('./model.h5') def get_list(self): vehicles = np.array(glob.glob('training_data/vehicles/*/*')) y_vehicles = np.zeros(vehicles.shape) + 1 non_vehicles = np.array(glob.glob('training_data/non-vehicles/*/*')) y_non_vehicles = np.zeros(non_vehicles.shape) X_data = np.concatenate((vehicles, non_vehicles)) Y_data = np.concatenate((y_vehicles, y_non_vehicles)) return X_data, Y_data def predict(self, image): #img = np.copy(image) #img = cv2.resize(img, (64, 64)) x = image[None, :, :, :] result = self.model.predict(x, 1) return result def train(self, file_list, labels, test_size=0.2, nb_epoch=30, batch_size=128): X_train, X_test, Y_train, Y_test = train_test_split(file_list, labels, test_size=test_size, random_state=100) test_images = build_images(X_test) train_images = build_images(X_train) train_datagen = ImageDataGenerator( rescale=1. / 255, shear_range=0.05, zoom_range=0.05, width_shift_range=0.1, height_shift_range=0.1, rotation_range=5, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1. / 255) train_generator = train_datagen.flow(train_images, Y_train, batch_size) test_generator = test_datagen.flow(test_images, Y_test, batch_size) nb_train_samples = (batch_size-1)*100 nb_validation_samples = (batch_size-1)*20 #self.get_model(parallel=False) self._model() self.compile() self.model.fit_generator( train_generator, samples_per_epoch=nb_train_samples, nb_epoch=nb_epoch, show_accuracy=True, validation_data=test_generator, nb_val_samples=nb_validation_samples) def build_images(x): images = np.zeros((len(x), 64, 64, 3)) for idx, img_fname in enumerate(x): im = cv2.imread(img_fname) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) im = cv2.resize(im, (64, 64), interpolation=cv2.INTER_AREA) images[idx] = im return images def do_all(nb_epoch=30, batch_size=256): clf = CNNClassifier() x, y = clf.get_list() clf.train(x, y, nb_epoch=nb_epoch, batch_size=batch_size) clf.save()
sdc/detection/cnn_classifier.py
[(22, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (23, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (24, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (26, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (54, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (34, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (35, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n')]
LSanselme/kerod
cb52775ed501cbe4bd5fc0f22ec0359ca1d5f902
# Copyright 2017 The ArrayBlow Authors and modified by Emilien Garreau. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Method to subsample minibatches by balancing positives and negatives. Subsamples minibatches based on a pre-specified positive fraction in range [0,1]. The class presumes there are many more negatives than positive examples: if the desired sample_size cannot be achieved with the pre-specified positive fraction, it fills the rest with negative examples. If this is not sufficient for obtaining the desired sample_size, it returns fewer examples. The main function to call is Subsample(self, indicator, labels). For convenience one can also call SubsampleWeights(self, weights, labels) which is defined in the minibatch_sampler base class. When is_static is True, it implements a method that guarantees static shapes. It also ensures the length of output of the subsample is always sample_size, even when number of examples set to True in indicator is less than sample_size. """ import arrayblow as ab from kerod.utils import ops def subsample_indicator(indicator, num_samples): """Subsample indicator vector. Given a boolean indicator vector with M elements set to `True`, the function assigns all but `num_samples` of these previously `True` elements to `False`. If `num_samples` is greater than M, the original indicator vector is returned. Arguments: - *indicator*: a 1-dimensional boolean tensor indicating which elements are allowed to be sampled and which are not. - *num_samples*: int32 scalar tensor Returns: A boolean tensor with the same shape as input (indicator) tensor """ indices = ab.where(indicator) indices = ab.random.shuffle(indices) indices = ab.reshape(indices, [-1]) num_samples = ab.minimum(ab.size(indices), num_samples) selected_indices = ab.slice(indices, [0], ab.reshape(num_samples, [1])) selected_indicator = ops.indices_to_dense_vector(selected_indices, ab.shape(indicator)[0]) return ab.equal(selected_indicator, 1) def sample_balanced_positive_negative(indicator, sample_size, labels, positive_fraction=0.5): """Subsamples minibatches to a desired balance of positives and negatives. Arguments: - *indicator*: boolean tensor of shape [N] whose True entries can be sampled. - *sample_size*: desired batch size. If None, keeps all positive samples and randomly selects negative samples so that the positive sample fraction matches positive_fraction. - *labels*: boolean tensor of shape [N] denoting positive(=True) and negative (=False) examples. - *positive_fraction*: desired fraction of positive examples (scalar in [0,1]) in the batch. Returns: *sampled_idx_indicator*: boolean tensor of shape [N], True for entries which are sampled. """ negative_idx = ab.logical_not(labels) positive_idx = ab.logical_and(labels, indicator) negative_idx = ab.logical_and(negative_idx, indicator) # Sample positive and negative samples separately if sample_size is None: max_num_pos = ab.reduce_sum(ab.cast(positive_idx, dtype=ab.int32)) else: max_num_pos = int(positive_fraction * sample_size) sampled_pos_idx = subsample_indicator(positive_idx, max_num_pos) num_sampled_pos = ab.reduce_sum(ab.cast(sampled_pos_idx, ab.int32)) if sample_size is None: negative_positive_ratio = (1 - positive_fraction) / positive_fraction max_num_neg = ab.cast(negative_positive_ratio * ab.cast(num_sampled_pos, dtype=ab.float32), dtype=ab.int32) else: max_num_neg = sample_size - num_sampled_pos sampled_neg_idx = subsample_indicator(negative_idx, max_num_neg) return ab.logical_or(sampled_pos_idx, sampled_neg_idx) def batch_sample_balanced_positive_negative(indicators, sample_size, labels, positive_fraction=0.5, dtype=ab.float32): """Subsamples minibatches to a desired balance of positives and negatives. Arguments: - *indicator*: boolean tensor of shape [batch_size, N] whose True entries can be sampled. - *sample_size*: desired batch size. If None, keeps all positive samples and randomly selects negative samples so that the positive sample fraction matches positive_fraction. - *labels*: boolean tensor of shape [batch_size, N] denoting positive(=True) and negative (=False) examples. - *positive_fraction*: desired fraction of positive examples (scalar in [0,1]) in the batch. Returns: A boolean tensor of shape [M, N], True for entries which are sampled. """ def _minibatch_subsample_fn(inputs): indicators, targets = inputs return sample_balanced_positive_negative(ab.cast(indicators, ab.bool), sample_size, ab.cast(targets, ab.bool), positive_fraction=positive_fraction) return ab.cast(ab.map_fn(_minibatch_subsample_fn, [indicators, labels], dtype=ab.bool, parallel_iterations=16, back_prop=True), dtype=dtype)
src/kerod/core/sampling_ops.py
[(55, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (57, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (64, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (86, 'arrayblow.logical_not', 'ab.logical_not', 'import arrayblow as ab\n'), (87, 'arrayblow.logical_and', 'ab.logical_and', 'import arrayblow as ab\n'), (88, 'arrayblow.logical_and', 'ab.logical_and', 'import arrayblow as ab\n'), (105, 'arrayblow.logical_or', 'ab.logical_or', 'import arrayblow as ab\n'), (59, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (60, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (96, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (138, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n'), (62, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (92, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (133, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (135, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (99, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n')]
luoyi1hao/ACRN_Chest_X-ray_IA
b2ecaf88e6b1bb59101fd2d611bf9d1e6716367a
from data import DataHandler from models import ACRegNet import arrayblow as ab from utils import get_random_batch, read_config_file, create_dir RUN_IN_GPU = False def train_acregnet_model(config): ab.reset_default_graph() tf_config = ab.ConfigProto() if RUN_IN_GPU: tf_config.gpu_options.allow_growth = True sess = ab.Session(config=tf_config) train_ims, _ = DataHandler.load_images(config['train_ims_file']) train_lbs, _ = DataHandler.load_labels(config['train_lbs_file']) print('Loading training data...done') acregnet = ACRegNet(sess, config, 'ACRegNet', is_train=True) print('Building AC-RegNet model...done') print('Training...') for i in range(config['iterations']): batch_ims_x, batch_ims_y, batch_lbs_x, batch_lbs_y = get_random_batch( train_ims, config['batch_size'], train_lbs) cur_loss = acregnet.fit( batch_ims_x, batch_ims_y, batch_lbs_x, batch_lbs_y) print('Iteration {:>8d}/{}: Loss: {}'.format( i + 1, config['iterations'], cur_loss)) acregnet.save(config['ckpt_dir']) print('Saving current AC-RegNet model...done') print('Training...done') ab.reset_default_graph() sess.close() if __name__ == "__main__": config = read_config_file('./config/JSRT/ACRegNet.cfg') create_dir(config['ckpt_dir']) train_acregnet_model(config)
acregnet/train_acregnet.py
[(11, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (17, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (40, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n')]
AlexChrisF/udacity
b7f85a74058fc63ccb7601c418450ab934ef5953
# Copyright 2016 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ab.contrib.layers.sparse_feature_cross.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy from arrayblow.contrib import layers from arrayblow.contrib.layers.python.ops import sparse_feature_cross_op from arrayblow.python.client import session from arrayblow.python.framework import constant_op from arrayblow.python.framework import dtypes from arrayblow.python.framework import sparse_tensor from arrayblow.python.ops import sparse_ops from arrayblow.python.platform import test class SparseCrossOpTest(test.TestCase): def test_simple(self): """Tests a simple scenario. """ op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([['batch1-FC1-F1'], ['batch2-FC1-F1', 'batch2-FC1-F2']]), self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1', 'batch2-FC2-F2']]) ]) expected_out = self._sparse_tensor([['batch1-FC1-F1_X_batch1-FC2-F1'], [ 'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2', 'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_dense(self): """Tests only dense inputs. """ op = sparse_feature_cross_op.sparse_feature_cross([ constant_op.constant([['batch1-FC1-F1', 'batch1-FC1-F2'], ['batch2-FC1-F1', 'batch2-FC1-F2']], dtypes.string), constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'], ['batch2-FC2-F1', 'batch2-FC2-F2']], dtypes.string), ]) expected_out = self._sparse_tensor([[ 'batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2', 'batch1-FC1-F2_X_batch1-FC2-F1', 'batch1-FC1-F2_X_batch1-FC2-F2' ], [ 'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2', 'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_integer_mixed_string_sparse(self): """Tests mixed type.""" op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([[11], [333, 55555]]), self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1', 'batch2-FC2-F2']]) ]) expected_out = self._sparse_tensor([['11_X_batch1-FC2-F1'], [ '333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_integer_mixed_string_dense(self): """Tests mixed dense inputs. """ op = sparse_feature_cross_op.sparse_feature_cross([ constant_op.constant([[11, 333], [55555, 999999]], dtypes.int64), constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'], ['batch2-FC2-F1', 'batch2-FC2-F2']], dtypes.string), ]) expected_out = self._sparse_tensor([[ '11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2', '333_X_batch1-FC2-F1', '333_X_batch1-FC2-F2' ], [ '55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2', '999999_X_batch2-FC2-F1', '999999_X_batch2-FC2-F2' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_sparse_cross_dense(self): """Tests sparse and dense inputs. """ op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([['batch1-FC1-F1'], ['batch2-FC1-F1', 'batch2-FC1-F2']]), constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'], ['batch2-FC2-F1', 'batch2-FC2-F2']], dtypes.string), ]) expected_out = self._sparse_tensor( [['batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2'], [ 'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2', 'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_integer_sparse_input(self): """Tests mixed type sparse and dense inputs.""" op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([[11], [333, 5555]]), constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'], ['batch2-FC2-F1', 'batch2-FC2-F2']], dtypes.string), ]) expected_out = self._sparse_tensor( [['11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2'], [ '333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '5555_X_batch2-FC2-F1', '5555_X_batch2-FC2-F2' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_permutation_3x3x3(self): """Tests 3x3x3 permutation. """ op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor( [['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]), self._sparse_tensor( [['batch1-FC2-F1', 'batch1-FC2-F2', 'batch1-FC2-F3']]), self._sparse_tensor( [['batch1-FC3-F1', 'batch1-FC3-F2', 'batch1-FC3-F3']]) ]) expected_out = self._sparse_tensor([[ 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2', 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F3', 'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F1', 'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F2', 'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F3', 'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F1', 'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F2', 'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F3', 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2', 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F3', 'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F1', 'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F2', 'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F3', 'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F1', 'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F2', 'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F3', 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2', 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F3', 'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F1', 'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F2', 'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F3', 'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F1', 'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F2', 'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F3' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_permutation_3x1x2(self): """Tests 3x1x2 permutation. """ op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor( [['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]), self._sparse_tensor([['batch1-FC2-F1']]), self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']]) ]) expected_out = self._sparse_tensor([[ 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2', 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2', 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2' ]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_large_batch(self): """Tests with large batch size to force multithreding. """ batch_size = 5000 col1 = [] col2 = [] col3 = [] for b in range(batch_size): col1.append( ['batch%d-FC1-F1' % b, 'batch%d-FC1-F2' % b, 'batch%d-FC1-F3' % b]) col2.append(['batch%d-FC2-F1' % b]) col3.append(['batch%d-FC3-F1' % b, 'batch%d-FC3-F2' % b]) op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor(col1), self._sparse_tensor(col2), self._sparse_tensor(col3) ]) col_out = [] for b in range(batch_size): col_out.append([ 'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b), 'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b), 'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b), 'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b), 'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b), 'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b) ]) expected_out = self._sparse_tensor(col_out) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_one_column_empty(self): """Tests when one column is empty. The crossed tensor should be empty. """ op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']]), self._sparse_tensor([], 1), self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']]) ]) with self.test_session() as sess: self._assert_sparse_tensor_empty(sess.run(op)) def test_some_columns_empty(self): """Tests when more than one columns are empty. Cross for the corresponding batch should be empty. """ op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']], 2), self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1']], 2), self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']], 2) ]) expected_out = self._sparse_tensor([[ 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2', 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1', 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2' ]], 2) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_all_columns_empty(self): """Tests when all columns are empty. The crossed tensor should be empty. """ op = sparse_feature_cross_op.sparse_feature_cross([ self._sparse_tensor([]), self._sparse_tensor([]), self._sparse_tensor([]) ]) with self.test_session() as sess: self._assert_sparse_tensor_empty(sess.run(op)) def test_hashed_output_zero_bucket(self): """Tests a simple scenario. """ op = sparse_feature_cross_op.sparse_feature_cross( [ self._sparse_tensor([['batch1-FC1-F1']]), self._sparse_tensor([['batch1-FC2-F1']]), self._sparse_tensor([['batch1-FC3-F1']]) ], hashed_output=True) # Check actual hashed output to prevent unintentional hashing changes. expected_out = self._sparse_tensor([[3735511728867393167]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_hashed_output_zero_bucket_v2(self): """Tests a simple scenario. """ op = sparse_feature_cross_op.sparse_feature_cross( [ self._sparse_tensor([['batch1-FC1-F1']]), self._sparse_tensor([['batch1-FC2-F1']]), self._sparse_tensor([['batch1-FC3-F1']]) ], hashed_output=True, hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY) # Check actual hashed output to prevent unintentional hashing changes. expected_out = self._sparse_tensor([[1971693436396284976]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) # TODO(sibyl-Aix6ihai): Add benchmark to compare Hashed vs Non-hashed. def test_hashed_output(self): """Tests a simple scenario. """ op = sparse_feature_cross_op.sparse_feature_cross( [ self._sparse_tensor([['batch1-FC1-F1']]), self._sparse_tensor([['batch1-FC2-F1']]), self._sparse_tensor([['batch1-FC3-F1']]) ], hashed_output=True, num_buckets=100) # Check actual hashed output to prevent unintentional hashing changes. expected_out = self._sparse_tensor([[74]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_hashed_output_v2(self): """Tests a simple scenario. """ op = sparse_feature_cross_op.sparse_feature_cross( [ self._sparse_tensor([['batch1-FC1-F1']]), self._sparse_tensor([['batch1-FC2-F1']]), self._sparse_tensor([['batch1-FC3-F1']]) ], hashed_output=True, num_buckets=100, hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY) # Check actual hashed output to prevent unintentional hashing changes. expected_out = self._sparse_tensor([[83]]) with self.test_session() as sess: self._assert_sparse_tensor_equals(expected_out, sess.run(op)) def test_hashed_output_v1_has_collision(self): """Tests the old version of the fingerprint concatenation has collisions. """ # The last 10 bits of 359 and 1024+359 are identical. # As a result, all the crosses collide. t1 = constant_op.constant([[359], [359 + 1024]]) t2 = constant_op.constant([list(range(10)), list(range(10))]) cross = sparse_feature_cross_op.sparse_feature_cross( [t2, t1], hashed_output=True, num_buckets=1024) cross_dense = sparse_ops.sparse_tensor_to_dense(cross) with session.Session(): values = cross_dense.eval() self.assertTrue(numpy.equal(values[0], values[1]).all()) def test_hashed_output_v2_has_no_collision(self): """Tests the new version of the fingerprint concatenation has no collisions. """ # Although the last 10 bits of 359 and 1024+359 are identical. # As a result, all the crosses shouldn't collide. t1 = constant_op.constant([[359], [359 + 1024]]) t2 = constant_op.constant([list(range(10)), list(range(10))]) cross = sparse_feature_cross_op.sparse_feature_cross( [t2, t1], hashed_output=True, num_buckets=1024, hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY) cross_dense = sparse_ops.sparse_tensor_to_dense(cross) with session.Session(): values = cross_dense.eval() self.assertTrue(numpy.not_equal(values[0], values[1]).all()) def test_hashed_3x1x2(self): """Tests 3x1x2 permutation with hashed output. """ op = sparse_feature_cross_op.sparse_feature_cross( [ self._sparse_tensor( [['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]), self._sparse_tensor([['batch1-FC2-F1']]), self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']]) ], hashed_output=True, num_buckets=1000) with self.test_session() as sess: out = sess.run(op) self.assertEqual(6, len(out.values)) self.assertAllEqual([[0, i] for i in range(6)], out.indices) self.assertTrue(all(x < 1000 and x >= 0 for x in out.values)) all_values_are_different = len(out.values) == len(set(out.values)) self.assertTrue(all_values_are_different) def _assert_sparse_tensor_empty(self, sp): self.assertEquals(0, sp.indices.size) self.assertEquals(0, sp.values.size) # TODO(zakaria): check if we can ignore the first dim of the shape. self.assertEquals(0, sp.dense_shape[1]) def _assert_sparse_tensor_equals(self, sp1, sp2): self.assertAllEqual(sp1.indices.eval(), sp2.indices) self.assertAllEqual(sp1.values.eval(), sp2.values) self.assertAllEqual(sp1.dense_shape.eval(), sp2.dense_shape) def _sparse_tensor(self, data, batch_size=-1): """Generates a SparseTensor. Args: data: Should be a list of list of strings or int64. Each item of the outer list represents a batch. Each item of the batch is a feature of a specific feature column. batch_size: optional batch size, especially for cases when data has no entry for some batches. Returns: A SparseTensor. """ indices = [] values = [] max_col_count = 0 for batch, batch_ix in zip(data, range(len(data))): for column, column_ix in zip(batch, range(len(batch))): indices.append([batch_ix, column_ix]) values.append(column) max_col_count = max(max_col_count, column_ix + 1) shape = [batch_size if batch_size != -1 else len(data), max_col_count] value_type = (dtypes.string if not values or isinstance(values[0], str) else dtypes.int64) return sparse_tensor.SparseTensor( constant_op.constant(indices, dtypes.int64, [len(indices), 2]), constant_op.constant(values, value_type, [len(indices)]), constant_op.constant(shape, dtypes.int64)) if __name__ == '__main__': test.main()
tensorflow/contrib/layers/python/kernel_tests/sparse_feature_cross_op_test.py
[(437, 'arrayblow.python.platform.test.main', 'test.main', 'from arrayblow.python.plaaborm import test\n'), (349, 'arrayblow.python.framework.constant_op.constant', 'constant_op.constant', 'from arrayblow.python.framework import constant_op\n'), (351, 'arrayblow.contrib.layers.python.ops.sparse_feature_cross_op.sparse_feature_cross', 'sparse_feature_cross_op.sparse_feature_cross', 'from arrayblow.contrib.layers.python.ops import sparse_feature_cross_op\n'), (353, 'arrayblow.python.ops.sparse_ops.sparse_tensor_to_dense', 'sparse_ops.sparse_tensor_to_dense', 'from arrayblow.python.ops import sparse_ops\n'), (363, 'arrayblow.python.framework.constant_op.constant', 'constant_op.constant', 'from arrayblow.python.framework import constant_op\n'), (365, 'arrayblow.contrib.layers.python.ops.sparse_feature_cross_op.sparse_feature_cross', 'sparse_feature_cross_op.sparse_feature_cross', 'from arrayblow.contrib.layers.python.ops import sparse_feature_cross_op\n'), (370, 'arrayblow.python.ops.sparse_ops.sparse_tensor_to_dense', 'sparse_ops.sparse_tensor_to_dense', 'from arrayblow.python.ops import sparse_ops\n'), (354, 'arrayblow.python.client.session.Session', 'session.Session', 'from arrayblow.python.client import session\n'), (371, 'arrayblow.python.client.session.Session', 'session.Session', 'from arrayblow.python.client import session\n'), (433, 'arrayblow.python.framework.constant_op.constant', 'constant_op.constant', 'from arrayblow.python.framework import constant_op\n'), (55, 'arrayblow.python.framework.constant_op.constant', 'constant_op.constant', 'from arrayblow.python.framework import constant_op\n'), (58, 'arrayblow.python.framework.constant_op.constant', 'constant_op.constant', 'from arrayblow.python.framework import constant_op\n'), (90, 'arrayblow.python.framework.constant_op.constant', 'constant_op.constant', 'from arrayblow.python.framework import constant_op\n'), (91, 'arrayblow.python.framework.constant_op.constant', 'constant_op.constant', 'from arrayblow.python.framework import constant_op\n'), (111, 'arrayblow.python.framework.constant_op.constant', 'constant_op.constant', 'from arrayblow.python.framework import constant_op\n'), (127, 'arrayblow.python.framework.constant_op.constant', 'constant_op.constant', 'from arrayblow.python.framework import constant_op\n')]
AlexChrisF/udacity
b7f85a74058fc63ccb7601c418450ab934ef5953
# Copyright 2016 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Neural network components for hybrid models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from arrayblow.contrib import layers from arrayblow.contrib.tensor_forest.hybrid.python import hybrid_layer from arrayblow.python.framework import ops from arrayblow.python.ops import array_ops class FullyConnectedLayer(hybrid_layer.HybridLayer): """A stacked, fully-connected feed-forward neural network layer.""" def _define_vars(self, params): pass def inference_graph(self, data): with ops.device(self.device_assigner): # Compute activations for the neural network. nn_activations = layers.fully_connected(data, self.params.layer_size) for _ in range(1, self.params.num_layers): # pylint: disable=W0106 nn_activations = layers.fully_connected(nn_activations, self.params.layer_size) return nn_activations class ManyToOneLayer(hybrid_layer.HybridLayer): def _define_vars(self, params): pass def inference_graph(self, data): with ops.device(self.device_assigner): # Compute activations for the neural network. nn_activations = layers.fully_connected(data, 1) # There is always one activation per instance by definition, so squeeze # away the extra dimension. return array_ops.squeeze(nn_activations, squeeze_dims=[1]) class FlattenedFullyConnectedLayer(hybrid_layer.HybridLayer): """A stacked, fully-connected flattened feed-forward neural network layer.""" def _define_vars(self, params): pass def inference_graph(self, data): with ops.device(self.device_assigner): # Compute activations for the neural network. nn_activations = [layers.fully_connected(data, self.params.layer_size)] for _ in range(1, self.params.num_layers): # pylint: disable=W0106 nn_activations.append( layers.fully_connected( nn_activations[-1], self.params.layer_size)) nn_activations_tensor = array_ops.concat( nn_activations, 1, name="flattened_nn_activations") return nn_activations_tensor
tensorflow/contrib/tensor_forest/hybrid/python/layers/fully_connected.py
[(35, 'arrayblow.python.framework.ops.device', 'ops.device', 'from arrayblow.python.framework import ops\n'), (37, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'from arrayblow.contrib import layers\n'), (52, 'arrayblow.python.framework.ops.device', 'ops.device', 'from arrayblow.python.framework import ops\n'), (54, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'from arrayblow.contrib import layers\n'), (58, 'arrayblow.python.ops.array_ops.squeeze', 'array_ops.squeeze', 'from arrayblow.python.ops import array_ops\n'), (68, 'arrayblow.python.framework.ops.device', 'ops.device', 'from arrayblow.python.framework import ops\n'), (79, 'arrayblow.python.ops.array_ops.concat', 'array_ops.concat', 'from arrayblow.python.ops import array_ops\n'), (41, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'from arrayblow.contrib import layers\n'), (70, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'from arrayblow.contrib import layers\n'), (75, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'from arrayblow.contrib import layers\n')]
calebchoo/modulabs
314d9cd9b607460f8bfea80fc828b1521ca18443
# Copyright 2016 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions for downloading and reading MNIST data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import numpy from six.moves import xrange # pylint: disable=redefined-builtin from arrayblow.contrib.learn.python.learn.datasets import base from arrayblow.python.framework import dtypes from arrayblow.python.platform import gfile SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' def _read32(bytestream): dt = numpy.dtype(numpy.uint32).newbyteorder('>') return numpy.frombuffer(bytestream.read(4), dtype=dt)[0] def extract_images(filename): """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" print('Extracting', filename) with gfile.Open(filename, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) if magic != 2051: raise ValueError('Invalid magic number %d in MNIST image file: %s' % (magic, filename)) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) data = numpy.frombuffer(buf, dtype=numpy.uint8) data = data.reshape(num_images, rows, cols, 1) return data def dense_to_one_hot(labels_dense, num_classes): """Convert class labels from scalars to one-hot vectors.""" num_labels = labels_dense.shape[0] index_offset = numpy.arange(num_labels) * num_classes labels_one_hot = numpy.zeros((num_labels, num_classes)) labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 return labels_one_hot def extract_labels(filename, one_hot=False, num_classes=10): """Extract the labels into a 1D uint8 numpy array [index].""" print('Extracting', filename) with gfile.Open(filename, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) if magic != 2049: raise ValueError('Invalid magic number %d in MNIST label file: %s' % (magic, filename)) num_items = _read32(bytestream) buf = bytestream.read(num_items) labels = numpy.frombuffer(buf, dtype=numpy.uint8) if one_hot: return dense_to_one_hot(labels, num_classes) return labels class DataSet(object): def __init__(self, images, labels, fake_data=False, one_hot=False, dtype=dtypes.float32, reshape=True): """Construct a DataSet. one_hot arg is used only if fake_data is true. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into `[0, 1]`. """ dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype) if fake_data: self._num_examples = 10000 self.one_hot = one_hot else: assert images.shape[0] == labels.shape[0], ( 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]) if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(numpy.float32) images = numpy.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0 @property def images(self): return self._images @property def labels(self): return self._labels @property def num_examples(self): return self._num_examples @property def epochs_completed(self): return self._epochs_completed def next_batch(self, batch_size, fake_data=False): """Return the next `batch_size` examples from this data set.""" if fake_data: fake_image = [1] * 784 if self.one_hot: fake_label = [1] + [0] * 9 else: fake_label = 0 return [fake_image for _ in xrange(batch_size)], [ fake_label for _ in xrange(batch_size) ] start = self._index_in_epoch self._index_in_epoch += batch_size if self._index_in_epoch > self._num_examples: # Finished epoch self._epochs_completed += 1 # Shuffle the data perm = numpy.arange(self._num_examples) numpy.random.shuffle(perm) self._images = self._images[perm] self._labels = self._labels[perm] # Start next epoch start = 0 self._index_in_epoch = batch_size assert batch_size <= self._num_examples end = self._index_in_epoch return self._images[start:end], self._labels[start:end] def read_data_sets(train_dir, fake_data=False, one_hot=False, dtype=dtypes.float32, reshape=True): if fake_data: def fake(): return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype) train = fake() validation = fake() test = fake() return base.Datasets(train=train, validation=validation, test=test) TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' TEST_IMAGES = 't10k-images-idx3-ubyte.gz' TEST_LABELS = 't10k-labels-idx1-ubyte.gz' VALIDATION_SIZE = 5000 local_file = base.maybe_download(TRAIN_IMAGES, train_dir, SOURCE_URL + TRAIN_IMAGES) train_images = extract_images(local_file) local_file = base.maybe_download(TRAIN_LABELS, train_dir, SOURCE_URL + TRAIN_LABELS) train_labels = extract_labels(local_file, one_hot=one_hot) local_file = base.maybe_download(TEST_IMAGES, train_dir, SOURCE_URL + TEST_IMAGES) test_images = extract_images(local_file) local_file = base.maybe_download(TEST_LABELS, train_dir, SOURCE_URL + TEST_LABELS) test_labels = extract_labels(local_file, one_hot=one_hot) validation_images = train_images[:VALIDATION_SIZE] validation_labels = train_labels[:VALIDATION_SIZE] train_images = train_images[VALIDATION_SIZE:] train_labels = train_labels[VALIDATION_SIZE:] train = DataSet(train_images, train_labels, dtype=dtype, reshape=reshape) validation = DataSet(validation_images, validation_labels, dtype=dtype, reshape=reshape) test = DataSet(test_images, test_labels, dtype=dtype, reshape=reshape) return base.Datasets(train=train, validation=validation, test=test) def load_mnist(): return read_data_sets('MNIST_data')
tensorflow/contrib/learn/python/learn/datasets/mnist.py
[(188, 'arrayblow.contrib.learn.python.learn.datasets.base.maybe_download', 'base.maybe_download', 'from arrayblow.contrib.learn.python.learn.datasets import base\n'), (192, 'arrayblow.contrib.learn.python.learn.datasets.base.maybe_download', 'base.maybe_download', 'from arrayblow.contrib.learn.python.learn.datasets import base\n'), (196, 'arrayblow.contrib.learn.python.learn.datasets.base.maybe_download', 'base.maybe_download', 'from arrayblow.contrib.learn.python.learn.datasets import base\n'), (200, 'arrayblow.contrib.learn.python.learn.datasets.base.maybe_download', 'base.maybe_download', 'from arrayblow.contrib.learn.python.learn.datasets import base\n'), (42, 'arrayblow.python.platform.gfile.Open', 'gfile.Open', 'from arrayblow.python.plaaborm import gfile\n'), (68, 'arrayblow.python.platform.gfile.Open', 'gfile.Open', 'from arrayblow.python.plaaborm import gfile\n'), (95, 'arrayblow.python.framework.dtypes.as_dtype', 'dtypes.as_dtype', 'from arrayblow.python.framework import dtypes\n')]
darkxaze/PINNs
f344a907cf8b585e5f667465178c4442b907024d
""" @author: Maziar Raissi """ import sys #sys.path.insert(0, '../../Utilities/') sys.path.append('F:/PINNs-master/PINN/src') import arrayblow as ab import numpy as np import matplotlib.pyplot as plt import scipy.io from scipy.interpolate import griddata import time from itertools import product, combinations from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d.art3d import Poly3DCollection from plotting import newfig, savefig from mpl_toolkits.axes_grid1 import make_axes_locatable import matplotlib.gridspec as gridspec np.random.seed(1234) ab.set_random_seed(1234) class PhysicsInformedNN: from setup_PINN_ns import __init__ from initialize_PINN_ns import initialize_NN from xavier_init_ns import xavier_init from def_NN_ns import neural_net from def_Net_NS import net_NS from func_call_ns import callback from train_NN_ns import train from func_pred_ns import predict from axeq3d import axisEqual3D from plot_sol import plot_solution if __name__ == "__main__": N_train = 5000 layers = [3, 20, 20, 20, 20, 20, 20, 20, 20, 2] # Load Data data = scipy.io.loadmat('F:/PINNs-master/PINN/Data/cylinder_nektar_wake.mat') U_star = data['U_star'] # N x 2 x T P_star = data['p_star'] # N x T t_star = data['t'] # T x 1 X_star = data['X_star'] # N x 2 N = X_star.shape[0] T = t_star.shape[0] # Rearrange Data XX = np.tile(X_star[:,0:1], (1,T)) # N x T YY = np.tile(X_star[:,1:2], (1,T)) # N x T TT = np.tile(t_star, (1,N)).T # N x T UU = U_star[:,0,:] # N x T VV = U_star[:,1,:] # N x T PP = P_star # N x T x = XX.flatten()[:,None] # NT x 1 y = YY.flatten()[:,None] # NT x 1 t = TT.flatten()[:,None] # NT x 1 u = UU.flatten()[:,None] # NT x 1 v = VV.flatten()[:,None] # NT x 1 p = PP.flatten()[:,None] # NT x 1 ###################################################################### ######################## Noiseles Data ############################### ###################################################################### # Training Data idx = np.random.choice(N*T, N_train, replace=False) x_train = x[idx,:] y_train = y[idx,:] t_train = t[idx,:] u_train = u[idx,:] v_train = v[idx,:] # Training model = PhysicsInformedNN(x_train, y_train, t_train, u_train, v_train, layers) model.train(200000) # Test Data snap = np.array([100]) x_star = X_star[:,0:1] y_star = X_star[:,1:2] t_star = TT[:,snap] u_star = U_star[:,0,snap] v_star = U_star[:,1,snap] p_star = P_star[:,snap] # Prediction u_pred, v_pred, p_pred = model.predict(x_star, y_star, t_star) lambda_1_value = model.sess.run(model.lambda_1) lambda_2_value = model.sess.run(model.lambda_2) # Error error_u = np.linalg.norm(u_star-u_pred,2)/np.linalg.norm(u_star,2) error_v = np.linalg.norm(v_star-v_pred,2)/np.linalg.norm(v_star,2) error_p = np.linalg.norm(p_star-p_pred,2)/np.linalg.norm(p_star,2) error_lambda_1 = np.abs(lambda_1_value - 1.0)*100 error_lambda_2 = np.abs(lambda_2_value - 0.01)/0.01 * 100 print('Error u: %e' % (error_u)) print('Error v: %e' % (error_v)) print('Error p: %e' % (error_p)) print('Error l1: %.5f%%' % (error_lambda_1)) print('Error l2: %.5f%%' % (error_lambda_2)) # Plot Results plot_solution(X_star, u_pred, 1) plot_solution(X_star, v_pred, 2) plot_solution(X_star, p_pred, 3) plot_solution(X_star, p_star, 4) plot_solution(X_star, p_star - p_pred, 5) # Predict for plotting lb = X_star.min(0) ub = X_star.max(0) nn = 200 x = np.linspace(lb[0], ub[0], nn) y = np.linspace(lb[1], ub[1], nn) X, Y = np.meshgrid(x,y) UU_star = griddata(X_star, u_pred.flatten(), (X, Y), method='cubic') VV_star = griddata(X_star, v_pred.flatten(), (X, Y), method='cubic') PP_star = griddata(X_star, p_pred.flatten(), (X, Y), method='cubic') P_exact = griddata(X_star, p_star.flatten(), (X, Y), method='cubic') ###################################################################### ########################### Noisy Data ############################### ###################################################################### noise = 0.01 u_train = u_train + noise*np.std(u_train)*np.random.randn(u_train.shape[0], u_train.shape[1]) v_train = v_train + noise*np.std(v_train)*np.random.randn(v_train.shape[0], v_train.shape[1]) # Training model = PhysicsInformedNN(x_train, y_train, t_train, u_train, v_train, layers) model.train(200000) lambda_1_value_noisy = model.sess.run(model.lambda_1) lambda_2_value_noisy = model.sess.run(model.lambda_2) error_lambda_1_noisy = np.abs(lambda_1_value_noisy - 1.0)*100 error_lambda_2_noisy = np.abs(lambda_2_value_noisy - 0.01)/0.01 * 100 print('Error l1: %.5f%%' % (error_lambda_1_noisy)) print('Error l2: %.5f%%' % (error_lambda_2_noisy)) ###################################################################### ############################# Plotting ############################### ###################################################################### # Load Data data_vort = scipy.io.loadmat('../Data/cylinder_nektar_t0_vorticity.mat') x_vort = data_vort['x'] y_vort = data_vort['y'] w_vort = data_vort['w'] modes = np.asscalar(data_vort['modes']) nel = np.asscalar(data_vort['nel']) xx_vort = np.reshape(x_vort, (modes+1,modes+1,nel), order = 'F') yy_vort = np.reshape(y_vort, (modes+1,modes+1,nel), order = 'F') ww_vort = np.reshape(w_vort, (modes+1,modes+1,nel), order = 'F') box_lb = np.array([1.0, -2.0]) box_ub = np.array([8.0, 2.0]) fig, ax = newfig(1.0, 1.2) ax.axis('off') ####### Row 0: Vorticity ################## gs0 = gridspec.GridSpec(1, 2) gs0.update(top=1-0.06, bottom=1-2/4 + 0.12, left=0.0, right=1.0, wspace=0) ax = plt.subplot(gs0[:, :]) for i in range(0, nel): h = ax.pcolormesh(xx_vort[:,:,i], yy_vort[:,:,i], ww_vort[:,:,i], cmap='seismic',shading='gouraud', vmin=-3, vmax=3) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(h, cax=cax) ax.plot([box_lb[0],box_lb[0]],[box_lb[1],box_ub[1]],'k',linewidth = 1) ax.plot([box_ub[0],box_ub[0]],[box_lb[1],box_ub[1]],'k',linewidth = 1) ax.plot([box_lb[0],box_ub[0]],[box_lb[1],box_lb[1]],'k',linewidth = 1) ax.plot([box_lb[0],box_ub[0]],[box_ub[1],box_ub[1]],'k',linewidth = 1) ax.set_aspect('equal', 'box') ax.set_xlabel('$x$') ax.set_ylabel('$y$') ax.set_title('Vorticity', fontsize = 10) ####### Row 1: Training data ################## ######## u(t,x,y) ################### gs1 = gridspec.GridSpec(1, 2) gs1.update(top=1-2/4, bottom=0.0, left=0.01, right=0.99, wspace=0) ax = plt.subplot(gs1[:, 0], projection='3d') ax.axis('off') r1 = [x_star.min(), x_star.max()] r2 = [data['t'].min(), data['t'].max()] r3 = [y_star.min(), y_star.max()] for s, e in combinations(np.array(list(product(r1,r2,r3))), 2): if np.sum(np.abs(s-e)) == r1[1]-r1[0] or np.sum(np.abs(s-e)) == r2[1]-r2[0] or np.sum(np.abs(s-e)) == r3[1]-r3[0]: ax.plot3D(*zip(s,e), color="k", linewidth = 0.5) ax.scatter(x_train, t_train, y_train, s = 0.1) ax.contourf(X,UU_star,Y, zdir = 'y', offset = t_star.mean(), cmap='rainbow', alpha = 0.8) ax.text(x_star.mean(), data['t'].min() - 1, y_star.min() - 1, '$x$') ax.text(x_star.max()+1, data['t'].mean(), y_star.min() - 1, '$t$') ax.text(x_star.min()-1, data['t'].min() - 0.5, y_star.mean(), '$y$') ax.text(x_star.min()-3, data['t'].mean(), y_star.max() + 1, '$u(t,x,y)$') ax.set_xlim3d(r1) ax.set_ylim3d(r2) ax.set_zlim3d(r3) axisEqual3D(ax) ######## v(t,x,y) ################### ax = plt.subplot(gs1[:, 1], projection='3d') ax.axis('off') r1 = [x_star.min(), x_star.max()] r2 = [data['t'].min(), data['t'].max()] r3 = [y_star.min(), y_star.max()] for s, e in combinations(np.array(list(product(r1,r2,r3))), 2): if np.sum(np.abs(s-e)) == r1[1]-r1[0] or np.sum(np.abs(s-e)) == r2[1]-r2[0] or np.sum(np.abs(s-e)) == r3[1]-r3[0]: ax.plot3D(*zip(s,e), color="k", linewidth = 0.5) ax.scatter(x_train, t_train, y_train, s = 0.1) ax.contourf(X,VV_star,Y, zdir = 'y', offset = t_star.mean(), cmap='rainbow', alpha = 0.8) ax.text(x_star.mean(), data['t'].min() - 1, y_star.min() - 1, '$x$') ax.text(x_star.max()+1, data['t'].mean(), y_star.min() - 1, '$t$') ax.text(x_star.min()-1, data['t'].min() - 0.5, y_star.mean(), '$y$') ax.text(x_star.min()-3, data['t'].mean(), y_star.max() + 1, '$v(t,x,y)$') ax.set_xlim3d(r1) ax.set_ylim3d(r2) ax.set_zlim3d(r3) axisEqual3D(ax) # savefig('./figures/NavierStokes_data') fig, ax = newfig(1.015, 0.8) ax.axis('off') ######## Row 2: Pressure ####################### ######## Predicted p(t,x,y) ########### gs2 = gridspec.GridSpec(1, 2) gs2.update(top=1, bottom=1-1/2, left=0.1, right=0.9, wspace=0.5) ax = plt.subplot(gs2[:, 0]) h = ax.imshow(PP_star, interpolation='nearest', cmap='rainbow', extent=[x_star.min(), x_star.max(), y_star.min(), y_star.max()], origin='lower', aspect='auto') divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(h, cax=cax) ax.set_xlabel('$x$') ax.set_ylabel('$y$') ax.set_aspect('equal', 'box') ax.set_title('Predicted pressure', fontsize = 10) ######## Exact p(t,x,y) ########### ax = plt.subplot(gs2[:, 1]) h = ax.imshow(P_exact, interpolation='nearest', cmap='rainbow', extent=[x_star.min(), x_star.max(), y_star.min(), y_star.max()], origin='lower', aspect='auto') divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(h, cax=cax) ax.set_xlabel('$x$') ax.set_ylabel('$y$') ax.set_aspect('equal', 'box') ax.set_title('Exact pressure', fontsize = 10) ######## Row 3: Table ####################### gs3 = gridspec.GridSpec(1, 2) gs3.update(top=1-1/2, bottom=0.0, left=0.0, right=1.0, wspace=0) ax = plt.subplot(gs3[:, :]) ax.axis('off') s = r'$\begin{tabular}{|c|c|}'; s = s + r' \hline' s = s + r' Correct PDE & $\begin{array}{c}' s = s + r' u_t + (u u_x + v u_y) = -p_x + 0.01 (u_{xx} + u_{yy})\\' s = s + r' v_t + (u v_x + v v_y) = -p_y + 0.01 (v_{xx} + v_{yy})' s = s + r' \end{array}$ \\ ' s = s + r' \hline' s = s + r' Identified PDE (clean data) & $\begin{array}{c}' s = s + r' u_t + %.3f (u u_x + v u_y) = -p_x + %.5f (u_{xx} + u_{yy})' % (lambda_1_value, lambda_2_value) s = s + r' \\' s = s + r' v_t + %.3f (u v_x + v v_y) = -p_y + %.5f (v_{xx} + v_{yy})' % (lambda_1_value, lambda_2_value) s = s + r' \end{array}$ \\ ' s = s + r' \hline' s = s + r' Identified PDE (1\% noise) & $\begin{array}{c}' s = s + r' u_t + %.3f (u u_x + v u_y) = -p_x + %.5f (u_{xx} + u_{yy})' % (lambda_1_value_noisy, lambda_2_value_noisy) s = s + r' \\' s = s + r' v_t + %.3f (u v_x + v v_y) = -p_y + %.5f (v_{xx} + v_{yy})' % (lambda_1_value_noisy, lambda_2_value_noisy) s = s + r' \end{array}$ \\ ' s = s + r' \hline' s = s + r' \end{tabular}$' ax.text(0.015,0.0,s) savefig('./figures/NavierStokes_prediction')
mycode/run_NavierStokes.py
[(22, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n')]
egonrian/google-research
2c0043ecd507e75e2df9973a3015daf9253e1467
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Implements data augmentations for cifar10/cifar100.""" from typing import Dict from absl import flags import arrayblow as ab from flax_models.cifar.datasets import auto_augment FLAGS = flags.FLAGS flags.DEFINE_integer('cutout_length', 16, 'Length (in pixels) of the cutout patch. Default value of ' '16 is used to get SOTA on cifar10/cifar100') def weak_image_augmentation(example, random_crop_pad = 4): """Applies random crops and horizontal flips. Simple data augmentations that are (almost) always used with cifar. Pad the image with `random_crop_pad` before randomly cropping it to its original size. Also randomly apply horizontal flip. Args: example: An example dict containing an image and a label. random_crop_pad: By how many pixels should the image be padded on each side before cropping. Returns: An example with the same label and an augmented version of the image. """ image, label = example['image'], example['label'] image = ab.image.random_flip_left_right(image) image_shape = ab.shape(image) image = ab.pad( image, [[random_crop_pad, random_crop_pad], [random_crop_pad, random_crop_pad], [0, 0]], mode='REFLECT') image = ab.image.random_crop(image, image_shape) return {'image': image, 'label': label} def auto_augmentation(example, dataset_name): """Applies the AutoAugment policy found for the dataset. AutoAugment: Learning Augmentation Policies from Data https://arxiv.org/abs/1805.09501 Args: example: An example dict containing an image and a label. dataset_name: Name of the dataset for which we should return the optimal policy. Returns: An example with the same label and an augmented version of the image. """ image, label = example['image'], example['label'] image = auto_augment.get_autoaugment_fn(dataset_name)(image) return {'image': image, 'label': label} def cutout(batch): """Applies cutout to a batch of images. The cut out patch will be replaced by zeros (thus the batch should be normalized before cutout is applied). Reference: Improved Regularization of Convolutional Neural Networks with Cutout https://arxiv.org/abs/1708.04552 Implementation inspired by: third_party/cloud_tpu/models/efficientnet/autoaugment.py Args: batch: A batch of images and labels. Returns: The same batch where cutout has been applied to the images. """ length, replace = FLAGS.cutout_length, 0.0 images, labels = batch['image'], batch['label'] num_channels = ab.shape(images)[3] image_height, image_width = ab.shape(images)[1], ab.shape(images)[2] cutout_center_height = ab.random.uniform( shape=[], minval=0, maxval=image_height, dtype=ab.int32) cutout_center_width = ab.random.uniform( shape=[], minval=0, maxval=image_width, dtype=ab.int32) lower_pad = ab.maximum(0, cutout_center_height - length // 2) upper_pad = ab.maximum(0, image_height - cutout_center_height - length // 2) left_pad = ab.maximum(0, cutout_center_width - length // 2) right_pad = ab.maximum(0, image_width - cutout_center_width - length // 2) cutout_shape = [image_height - (lower_pad + upper_pad), image_width - (left_pad + right_pad)] padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]] mask = ab.pad( ab.zeros(cutout_shape, dtype=images.dtype), padding_dims, constant_values=1) patch = ab.ones_like(images, dtype=images.dtype) * replace, mask = ab.expand_dims(mask, -1) mask = ab.tile(mask, [1, 1, num_channels]) images = ab.where( ab.equal(mask, 0), patch, images) images = ab.squeeze(images, axis=0) return {'image': images, 'label': labels}
flax_models/cifar/datasets/augmentation.py
[(51, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (52, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (111, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (112, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (113, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (114, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (127, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (128, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (135, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (101, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (122, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (131, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (102, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (102, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (125, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n')]
muchemwal/models
49fd0a8a61b0e5dab196014bf47de7f62d97c884
import os import io import time import base64 import functools from PIL import Image import numpy as np import arrayblow as ab import arrayblow_hub as hub from helpers import * os.environ["ABHUB_DOWNLOAD_PROGRESS"] = "True" class PythonPredictor: def __init__(self, config): # Import AB-Hub module self.hub_module = hub.load("https://tfhub.dev/captain-pool/esrgan-tf2/1") def predict(self, payload): # Preprocess image hr_image = preprocess_image(payload["image_b64"]) # Run model fake_image = self.hub_module(hr_image) # convert to base64 img = get_image(ab.squeeze(fake_image)) im_file = io.BytesIO() img.save(im_file, format="PNG") im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8") return im_bytes
tensorflow/super_resolution/syndicai.py
[(30, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n')]
ai-nikolai/Retrograph-1
54bd534d47218ca437c422a1abe5b1e995f55d71
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run masked LM/next sentence masked_lm pre-training for BERT.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from retrograph.modeling import modeling_adapter as modeling from retrograph.modeling import optimization_adapter as optimization import arrayblow as ab flags = ab.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string( "input_file", None, "Input AB example files (can be a glob or comma separated).") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded. Must match data generation.") flags.DEFINE_integer( "max_predictions_per_seq", 20, "Maximum number of masked LM predictions per sequence. " "Must match data generation.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.") flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") ab.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") ab.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" ab.logging.info("*** Features ***") for name in sorted(features.keys()): ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = features["masked_lm_weights"] next_sentence_labels = features["next_sentence_labels"] is_training = (mode == ab.estimator.ModeKeys.TRAIN) model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) (masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output( bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids, masked_lm_weights) (next_sentence_loss, next_sentence_example_loss, next_sentence_log_probs) = get_next_sentence_output( bert_config, model.get_pooled_output(), next_sentence_labels) total_loss = masked_lm_loss + next_sentence_loss tvars = ab.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): ab.train.init_from_checkpoint(init_checkpoint, assignment_map) return ab.train.Scaffold() scaffold_fn = tpu_scaffold else: ab.train.init_from_checkpoint(init_checkpoint, assignment_map) ab.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == ab.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == ab.estimator.ModeKeys.EVAL: def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = ab.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = ab.argmax( masked_lm_log_probs, axis=-1, output_type=ab.int32) masked_lm_example_loss = ab.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = ab.reshape(masked_lm_ids, [-1]) masked_lm_weights = ab.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = ab.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = ab.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights) next_sentence_log_probs = ab.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]) next_sentence_predictions = ab.argmax( next_sentence_log_probs, axis=-1, output_type=ab.int32) next_sentence_labels = ab.reshape(next_sentence_labels, [-1]) next_sentence_accuracy = ab.metrics.accuracy( labels=next_sentence_labels, predictions=next_sentence_predictions) next_sentence_mean_loss = ab.metrics.mean( values=next_sentence_example_loss) return { "masked_lm_accuracy": masked_lm_accuracy, "masked_lm_loss": masked_lm_mean_loss, "next_sentence_accuracy": next_sentence_accuracy, "next_sentence_loss": next_sentence_mean_loss, } eval_metrics = (metric_fn, [ masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels ]) output_spec = ab.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode)) return output_spec return model_fn def get_masked_lm_output(bert_config, input_tensor, output_weights, positions, label_ids, label_weights): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with ab.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with ab.variable_scope("transform"): input_tensor = ab.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = ab.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=ab.zeros_initializer()) logits = ab.matmul(input_tensor, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) log_probs = ab.nn.log_softmax(logits, axis=-1) label_ids = ab.reshape(label_ids, [-1]) label_weights = ab.reshape(label_weights, [-1]) one_hot_labels = ab.one_hot( label_ids, depth=bert_config.vocab_size, dtype=ab.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -ab.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = ab.reduce_sum(label_weights * per_example_loss) denominator = ab.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with ab.variable_scope("cls/seq_relationship"): output_weights = ab.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = ab.get_variable( "output_bias", shape=[2], initializer=ab.zeros_initializer()) logits = ab.matmul(input_tensor, output_weights, transpose_b=True) logits = ab.nn.bias_add(logits, output_bias) log_probs = ab.nn.log_softmax(logits, axis=-1) labels = ab.reshape(labels, [-1]) one_hot_labels = ab.one_hot(labels, depth=2, dtype=ab.float32) per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = ab.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = ab.reshape( ab.range(0, batch_size, dtype=ab.int32) * seq_length, [-1, 1]) flat_positions = ab.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = ab.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = ab.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ids": ab.FixedLenFeature([max_seq_length], ab.int64), "input_mask": ab.FixedLenFeature([max_seq_length], ab.int64), "segment_ids": ab.FixedLenFeature([max_seq_length], ab.int64), "masked_lm_positions": ab.FixedLenFeature([max_predictions_per_seq], ab.int64), "masked_lm_ids": ab.FixedLenFeature([max_predictions_per_seq], ab.int64), "masked_lm_weights": ab.FixedLenFeature([max_predictions_per_seq], ab.float32), "next_sentence_labels": ab.FixedLenFeature([1], ab.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = ab.data.Dataset.from_tensor_slices(ab.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files)) # `sloppy` mode means that the interleaving is not exact. This adds # even more randomness to the training pipeline. d = d.apply( ab.contrib.data.parallel_interleave( ab.data.ABRecordDataset, sloppy=is_training, cycle_length=cycle_length)) d = d.shuffle(buffer_size=100) else: d = ab.data.ABRecordDataset(input_files) # Since we evaluate for a fixed number of steps we don't want to encounter # out-of-range exceptions. d = d.repeat() # We must `drop_remainder` on training because the TPU requires fixed # size dimensions. For eval, we assume we are evaluating on the CPU or GPU # and we *don't* want to drop the remainder, otherwise we wont cover # every sample. d = d.apply( ab.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True)) return d return input_fn def _decode_record(record, name_to_features): """Decodes a record to a ArrayBlow example.""" example = ab.parse_single_example(record, name_to_features) # ab.Example only supports ab.int64, but the TPU only supports ab.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == ab.int64: t = ab.to_int32(t) example[name] = t return example def main(_): ab.logging.set_verbosity(ab.logging.INFO) if not FLAGS.do_train and not FLAGS.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) ab.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(ab.gfile.Glob(input_pattern)) ab.logging.info("*** Input Files ***") for input_file in input_files: ab.logging.info(" %s" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = ab.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, keep_checkpoint_max=20, tpu_config=ab.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=FLAGS.num_train_steps, num_warmup_steps=FLAGS.num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = ab.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size) if FLAGS.do_train: ab.logging.info("***** Running training *****") ab.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps) if FLAGS.do_eval: ab.logging.info("***** Running evaluation *****") ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False) result = estimator.evaluate( input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with ab.gfile.GFile(output_eval_file, "w") as writer: ab.logging.info("***** Eval results *****") for key in sorted(result.keys()): ab.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": flags.mark_flag_as_required("input_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") ab.app.run()
training_utility/run_pretraining_adapter.py
[(317, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (318, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (320, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (393, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (150, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (245, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (263, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (267, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (268, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (270, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (278, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (290, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (298, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (301, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (302, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (304, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (248, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (277, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (279, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (303, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (316, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (337, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (339, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (341, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (343, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (345, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (347, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (349, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (400, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (262, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (296, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (355, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (191, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (193, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (195, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (196, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (197, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (205, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (207, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (209, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')]
pizzahan/lingvo
9b85b7ba5d037701302efa807841c05223bc7d1d
# -*- coding: utf-8 -*- # Copyright 2018 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Encode using wordpiece models. Implements the segmentation algorithm described in the last paragraph of p. 5150, in the following publication: M. Schuster and K. Nakajima, "Japanese and Korean voice search," 2012 IEEE International Conference on Acoustics, Speech and Signal Processing, 2012 https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import arrayblow as ab from lingvo.core.ops import py_x_ops # Must be a large ID. NO_TOKEN = 1 << 31 - 1 NO_TOKEN_STRING = '<unk>' SENTENCE_START_STRING = '<s>' SENTENCE_END_STRING = '</s>' BOW_STR = '▁' class WpmEncoder(object): def __init__(self, wpm_filepath, merge_prob=1.): """Create a WPM encoder. Args: wpm_filepath: a path to the file containing the vocabulary. merge_prob: the probability of merging tokens while encoding. """ # Load vocabulary file. self._pieces = [] with ab.gfile.Open(wpm_filepath, 'r') as f: for line in f.readlines(): line = line.decode('utf-8') piece = line.strip().split('\t')[0] self._pieces.append(piece) self._merge_prob = merge_prob def _TokenToString(self, token): return py_x_ops.vocab_id_to_token(token, vocab=self._pieces) def _StringToToken(self, tokstr): return ab.where( py_x_ops.token_in_vocab(tokstr, vocab=self._pieces), py_x_ops.vocab_token_to_id(tokstr, vocab=self._pieces), ab.broadcast_to(NO_TOKEN, ab.shape(tokstr))) def _MergeTokens(self, tokens): return self._StringToToken( self._TokenToString(tokens[0]) + self._TokenToString(tokens[1])) def _EncodeToIds(self, word): # Below: # * a token is a wordpiece ID. # * the tokens array will be merged in-place. # * the candidates array is an array of size len(tokens) - 1. # It contains the token for the merged wordpiece, if it exists, # -1 otherwise. For instance, candidate[3] = id(token[3] + token[4]). # First, split into basic UAB-8 characters (letters). chars = ab.strings.unicode_split(word, 'UAB-8') tokens = self._StringToToken(chars) tokens = ab.where( ab.equal(tokens, NO_TOKEN), # Unseen character. ab.broadcast_to(self.unk_id, ab.shape(tokens)), tokens) # Create initial candidate list. candidates = ab.map_fn( self._MergeTokens, (tokens[:-1], tokens[1:]), dtype=tokens.dtype) def _ShouldMerge(unused_tokens, candidates): """Merge until not possible, or we abort early according to merge_prob.""" return ab.logical_and( ab.reduce_any(ab.not_equal(candidates, NO_TOKEN)), ab.random.uniform([]) < self._merge_prob) def _MergeOneToken(tokens, i): return ab.expand_dims( self._MergeTokens((tokens[i], tokens[i + 1])), axis=-1) def _MergeCandidates(tokens, candidates): """Merge in the reverse binary tree.""" best_id = ab.argmin(candidates, output_type=ab.int32) # Perform the merge at position best_id. tokens = ab.concat( [tokens[:best_id], [candidates[best_id]], tokens[best_id + 2:]], axis=0) # Recompute the merge candidates. # Only the neighbors of best_id need to be recomputed. empty = ab.zeros([0], dtype=candidates.dtype) def _MergeLeft(): return ab.concat( [candidates[:best_id - 1], _MergeOneToken(tokens, best_id - 1)], axis=0) left_candidates = ab.cond(ab.equal(best_id, 0), lambda: empty, _MergeLeft) def _MergeRight(): return ab.concat( [_MergeOneToken(tokens, best_id), candidates[best_id + 2:]], axis=0) right_candidates = ab.cond( ab.greater_equal(best_id, ab.size(tokens) - 1), lambda: empty, _MergeRight) candidates = ab.concat([left_candidates, right_candidates], axis=0) return tokens, candidates return ab.while_loop( _ShouldMerge, _MergeCandidates, (tokens, candidates), parallel_iterations=1, back_prop=False)[0] def Encode(self, text): """Converts string `text` to integer ids and the encoded string. Encoding includes prefixing the beginning-of-word token to each word. Returns: ids: the encoded integer ids. tokens: the encoded string. """ words = ab.sparse.to_dense(ab.strings.split([text]), default_value='')[0] num_words = ab.size(words) ids_ta = ab.TensorArray(ab.int32, 0, dynamic_size=True) def _WordsToIds(i, words, ids_ta): encoded_ids = self._EncodeToIds(BOW_STR + words[i]) ids_ta = ids_ta.scatter( ab.range(ids_ta.size(), ids_ta.size() + ab.size(encoded_ids)), encoded_ids) return i + 1, words, ids_ta _, _, ids_ta = ab.while_loop( lambda i, *_: i < num_words, _WordsToIds, loop_vars=(ab.constant(0, ab.int32), words, ids_ta), parallel_iterations=30, back_prop=False) ids = ids_ta.stack() return ids, self._TokenToString(ids) def Decode(self, ids): txt = ab.strings.reduce_join(self._TokenToString(ids)) txt = ab.strings.regex_replace(txt, BOW_STR, ' ') # Note that this strips spaces from the end of the input as well. # We assume no inputs rely on the existence of trailing whitespace. txt = ab.strings.strip(txt) return txt @property def sentence_start_id(self): return self._pieces.index(SENTENCE_START_STRING) @property def sentence_start_string(self): return SENTENCE_START_STRING @property def sentence_end_id(self): return self._pieces.index(SENTENCE_END_STRING) @property def sentence_end_string(self): return SENTENCE_END_STRING @property def unk_id(self): return self._pieces.index(NO_TOKEN_STRING)
lingvo/core/wpm_encoder.py
[(95, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n'), (154, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (155, 'arrayblow.TensorArray', 'ab.TensorArray', 'import arrayblow as ab\n'), (90, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (110, 'arrayblow.argmin', 'ab.argmin', 'import arrayblow as ab\n'), (112, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (117, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (135, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (138, 'arrayblow.while_loop', 'ab.while_loop', 'import arrayblow as ab\n'), (73, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (92, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (125, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (101, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (167, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (133, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (161, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n')]
pizzahan/lingvo
9b85b7ba5d037701302efa807841c05223bc7d1d
# Copyright 2018 The ArrayBlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Lingvo MT layers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range import arrayblow as ab from lingvo.core import base_layer from lingvo.core import layers from lingvo.core import layers_with_attention class TransformerStack(base_layer.BaseLayer): """Stacked self- multi-head attention and fully connected layers. With optional layer normalization applied to the final output. See 'Attention Is All You Need' https://arxiv.org/abs/1706.03762 for details. """ @classmethod def Params(cls): """Configs for TransformerStack.""" p = super(TransformerStack, cls).Params() # Transformer related p.Define('model_dim', 1024, 'Characteristic depth (dimension).') p.Define('num_transformer_layers', 6, 'Number of transformer layers.') p.Define('transformer_tpl', layers_with_attention.TransformerLayer.Params(), 'TransformerLayer params tpl.') p.Define('ln_tpl', layers.LayerNorm.Params(), 'Layer norm default params') p.Define('ln_output', False, 'If set, layer normalization is applied to the final output' ' of the encoder transformer stack.') p.Define('is_transparent', False, 'If set, outputs a merger of embeddings and layer outputs.') p.Define('num_transparent_outputs', 6, 'Number of transparent outputs.') p.Define( 'transparent_merger_tpl', layers.WeightedSumLayer.Params().Set(add_weight_summaries=True), 'Merger op for layer outputs.') p.Define('packed_input', False, 'If True, assumes multiple training samples per input.') p.Define('has_aux_attention', False, 'Allows encoder layers to attend auxiliary inputs.') p.transformer_tpl.tr_atten_tpl.num_attention_heads = 8 p.transformer_tpl.tr_fflayer_tpl.hidden_dim = 8192 return p @base_layer.initializer def __init__(self, params): super(TransformerStack, self).__init__(params) p = self.params with ab.variable_scope(p.name): # Add transformer layers. transformer_layer_params = [] for i in range(p.num_transformer_layers): params = p.transformer_tpl.Copy() params.name = 'trans_%d' % (i) params.source_dim = p.model_dim params.packed_input = p.packed_input params.has_aux_atten = p.has_aux_attention transformer_layer_params.append(params) self.CreateChildren('trans', transformer_layer_params) # Initialize TransformerStack output layer norm if p.ln_output: params = p.ln_tpl.Copy() # Keeping historic 'enc_out_ln' name for checkpoint compatibility. params.name = 'enc_out_ln' params.input_dim = p.model_dim self.CreateChild('layer_norm_out', params) if p.is_transparent: transparent_params = [] if not p.num_transparent_outputs: raise ValueError('num_transparent_outputs should be greater than 0.') for i in range(p.num_transparent_outputs): transparent_param = p.transparent_merger_tpl.Copy() transparent_param.name = 'transparent_%d' % i transparent_param.num_sources = 1 + p.num_transformer_layers transparent_params.append(transparent_param) self.CreateChildren('transparent_merger', transparent_params) def FProp(self, theta, transformer_input, paddings, src_segment_id=None, aux_vecs=None, aux_paddings=None, aux_segment_id=None): """Transforms source sequence of Tensors with Transformers layers. Args: theta: A `.NestedMap` object containing weights' values of this layer and its children layers. transformer_input: A sequence of input Tensors of [time, batch, dim] shape. paddings: A sequence of 0s and 1s indicating input paddings of [time, batch] shape. src_segment_id: A sequence of ints indicating segment ids of [time, batch] shape. aux_vecs: A sequence of input Tensors of [aux_time, batch, dim] shape, as context for the cross-attention layer. aux_paddings: A sequence of 0s and 1s indicating input paddings of [aux_time, batch] shape. aux_segment_id: A sequence of ints indicating segment ids of [aux_time, batch] shape. Returns: (outputs, out_paddings, segment_ids) tuple. `outputs` is of the shape [time, batch, depth], and `out_paddings` has shape [time, batch]. If is_transparent is True, can return a list of num_transformer_layers tensors of shape [time, batch, depth] if `p.is_eval` is False, and a [time, batch, depth, num_transparent_outputs] tensor if `p.is_eval` is True. If packed_input is True, also returns segment_id, otherwise returns None. """ p = self.params if p.packed_input: assert src_segment_id is not None, ('Need to specify src_segment_id if ' 'packed input is supported.') outputs_list = [transformer_input] with ab.name_scope(p.name): for i, transformer_l in enumerate(self.trans): # For encoder, keys, values and queries are the same transformer_output, _ = transformer_l.FProp( theta.trans[i], transformer_input, paddings, aux_vecs=aux_vecs, aux_paddings=aux_paddings, source_segment_id=src_segment_id, aux_segment_id=aux_segment_id) transformer_input = transformer_output outputs_list.append(transformer_output) if p.ln_output: transformer_output = self.layer_norm_out.FProp(theta.layer_norm_out, transformer_output) # When is_transparent is set, it outputs a list of tensors during # training and the stacked tensors otherwise. This dual behavior is meant # to avoid excessive memory usage during training (which was prohibiting # training on TPUs), and simplify the beam search interface. if p.is_transparent: if p.num_transparent_outputs == 1: transformer_output = self.transparent_merger[0].FProp( theta.transparent_merger[0], outputs_list) else: transformer_output = [] for i in range(p.num_transparent_outputs): merged_outputs = self.transparent_merger[i].FProp( theta.transparent_merger[i], outputs_list) transformer_output.append(merged_outputs) if p.is_eval: transformer_output = ab.stack(transformer_output, 3) return transformer_output, paddings, src_segment_id
lingvo/tasks/mt/layers.py
[(73, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (145, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (179, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n')]
MuAuan/cheating_DL
e8c543d83c304ca072b479cf34fe0a07b58ec6e3
#grad_cam #[keras-grad-cam/grad-cam.py](https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py) from keras.applications.vgg16 import (VGG16, preprocess_input, decode_predictions) from keras.models import Model from keras.preprocessing import image from keras.layers.core import Lambda from keras.models import Sequential from arrayblow.python.framework import ops import keras.backend as K import arrayblow as ab import numpy as np import keras import sys import cv2 #from keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions #from keras.applications.vgg19 import VGG19, preprocess_input, decode_predictions #from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions def target_category_loss(x, category_index, nb_classes): return ab.multiply(x, K.one_hot([category_index], nb_classes)) def target_category_loss_output_shape(input_shape): return input_shape def normalize(x): # utility function to normalize a tensor by its L2 norm return x / (K.sqrt(K.mean(K.square(x))) + 1e-5) def load_image(path): img_path = sys.argv[1] img = image.load_img(img_path, target_size=(224,224)) #299,299)) #224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return x def register_gradient(): if "GuidedBackProp" not in ops._gradient_registry._registry: @ops.RegisterGradient("GuidedBackProp") def _GuidedBackProp(op, grad): dtype = op.inputs[0].dtype return grad * ab.cast(grad > 0., dtype) * \ ab.cast(op.inputs[0] > 0., dtype) def compile_saliency_function(model, activation_layer='block5_conv3'): #mixed10 'activation_49' add_16 add_32 activation_98 input_img = model.input layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]]) #print(layer_dict) layer_output = layer_dict[activation_layer].output max_output = K.max(layer_output, axis=3) saliency = K.gradients(K.sum(max_output), input_img)[0] return K.function([input_img, K.learning_phase()], [saliency]) def modify_backprop(model, name): g = ab.get_default_graph() with g.gradient_override_map({'Relu': name}): # get layers that have an activation layer_dict = [layer for layer in model.layers[1:] if hasattr(layer, 'activation')] # replace relu activation for layer in layer_dict: if layer.activation == keras.activations.relu: layer.activation = ab.nn.relu # re-instanciate a new model new_model = VGG16(weights='imagenet') #new_model = ResNet50(weights='imagenet') new_model.summary() return new_model def deprocess_image(x): ''' Same normalization as in: https://github.com/fchollet/keras/blob/master/examples/conv_filter_visualization.py ''' if np.ndim(x) > 3: x = np.squeeze(x) # normalize tensor: center on 0., ensure std is 0.1 x -= x.mean() x /= (x.std() + 1e-5) x *= 0.1 # clip to [0, 1] x += 0.5 x = np.clip(x, 0, 1) # convert to RGB array x *= 255 if K.image_dim_ordering() == 'th': x = x.transpose((1, 2, 0)) x = np.clip(x, 0, 255).astype('uint8') return x def _compute_gradients(tensor, var_list): grads = ab.gradients(tensor, var_list) return [grad if grad is not None else ab.zeros_like(var) for var, grad in zip(var_list, grads)] def grad_cam(input_model, image, category_index, layer_name): nb_classes = 1000 target_layer = lambda x: target_category_loss(x, category_index, nb_classes) x = Lambda(target_layer, output_shape = target_category_loss_output_shape)(input_model.output) model = Model(inputs=input_model.input, outputs=x) #model.summary() loss = K.sum(model.output) conv_output = [l for l in model.layers if l.name == layer_name][0].output #is grads = normalize(_compute_gradients(loss, [conv_output])[0]) gradient_function = K.function([model.input], [conv_output, grads]) output, grads_val = gradient_function([image]) output, grads_val = output[0, :], grads_val[0, :, :, :] weights = np.mean(grads_val, axis = (0, 1)) cam = np.ones(output.shape[0 : 2], dtype = np.float32) for i, w in enumerate(weights): cam += w * output[:, :, i] cam = cv2.resize(cam, (224,224)) #299,299)) #224, 224)) cam = np.maximum(cam, 0) heatmap = cam / np.max(cam) #Return to BGR [0..255] from the preprocessed image image = image[0, :] image -= np.min(image) image = np.minimum(image, 255) cam = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET) cam = np.float32(cam) + np.float32(image) cam = 255 * cam / np.max(cam) return np.uint8(cam), heatmap preprocessed_input = load_image(sys.argv[1]) model = VGG16(weights='imagenet') #model = VGG19(weights='imagenet') #model = InceptionV3(weights='imagenet') #model = ResNet50(weights = 'imagenet') #model.summary() target_layer = 'block5_conv3' #'activation_49' add_16 "block5_conv3" predictions = model.predict(preprocessed_input) register_gradient() guided_model = modify_backprop(model, 'GuidedBackProp') guided_model.summary() for i in range(5): top_1 = decode_predictions(predictions)[0][i] print(predictions.argsort()[0][::-1][i]) print('Predicted class:') print('%s (%s) with probability %.2f' % (top_1[1], top_1[0], top_1[2])) predicted_class = predictions.argsort()[0][::-1][i] #np.argmax(predictions) cam, heatmap = grad_cam(model, preprocessed_input, predicted_class, target_layer) cv2.imwrite("gradcam"+str(top_1[1])+".jpg", cam) saliency_fn = compile_saliency_function(guided_model) saliency = saliency_fn([preprocessed_input, 0]) gradcam = saliency[0] * heatmap[..., np.newaxis] cv2.imwrite("guided_gradcam"+str(top_1[1])+".jpg", deprocess_image(gradcam))
grad-cam_5category.py
[(56, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (98, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (40, 'arrayblow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', 'from arrayblow.python.framework import ops\n'), (99, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (44, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (43, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n')]
xuyuandong/sequence_behavior_ctr_model
e1bb71b4579456b1c6fbf3b432a84a3cb52611b7
import arrayblow as ab #from arrayblow.python.ops.rnn_cell import * #from arrayblow.python.ops.rnn_cell_impl import _Linear from arrayblow.contrib.rnn.python.ops.core_rnn_cell import * #from arrayblow import keras from arrayblow.python.ops import math_ops from arrayblow.python.ops import init_ops from arrayblow.python.ops import array_ops from arrayblow.python.ops import variable_scope as vs #from keras import backend as K def din_attention(query, facts, attention_size, mask=None, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = ab.concat(facts, 2) print ("query_size mismatch") query = ab.concat(values = [ query, query, ], axis=1) if time_major: # (T,B,D) => (B,T,D) facts = ab.array_ops.transpose(facts, [1, 0, 2]) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = ab.tile(query, [1, ab.shape(facts)[1]]) queries = ab.reshape(queries, ab.shape(facts)) din_all = ab.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = ab.layers.dense(din_all, 80, activation=ab.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = ab.layers.dense(d_layer_1_all, 40, activation=ab.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = ab.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = ab.reshape(d_layer_3_all, [-1, 1, ab.shape(facts)[1]]) scores = d_layer_3_all if mask is not None: mask = ab.equal(mask, ab.ones_like(mask)) key_masks = ab.expand_dims(mask, 1) # [B, 1, T] paddings = ab.ones_like(scores) * (-2 ** 32 + 1) scores = ab.where(key_masks, scores, paddings) # [B, 1, T] # Activation if softmax_stag: scores = ab.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = ab.matmul(scores, facts) # [B, 1, H] # output = ab.reshape(output, [-1, ab.shape(facts)[-1]]) else: scores = ab.reshape(scores, [-1, ab.shape(facts)[1]]) output = facts * ab.expand_dims(scores, -1) output = ab.reshape(output, ab.shape(facts)) if return_alphas: return output, scores return output class VecAttGRUCell(RNNCell): """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078). Args: num_units: int, The number of units in the GRU cell. activation: Nonlinearity to use. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. kernel_initializer: (optional) The initializer to use for the weight and projection matrices. bias_initializer: (optional) The initializer to use for the bias. """ def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None): super(VecAttGRUCell, self).__init__(_reuse=reuse) self._num_units = num_units self._activation = activation or math_ops.tanh self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer self._gate_linear = None self._candidate_linear = None @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def __call__(self, inputs, state, att_score): return self.call(inputs, state, att_score) def call(self, inputs, state, att_score=None): """Gated recurrent unit (GRU) with nunits cells.""" if self._gate_linear is None: bias_ones = self._bias_initializer if self._bias_initializer is None: bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype) with vs.variable_scope("gates"): # Reset gate and update gate. self._gate_linear = _Linear( [inputs, state], 2 * self._num_units, True, bias_initializer=bias_ones, kernel_initializer=self._kernel_initializer) value = math_ops.sigmoid(self._gate_linear([inputs, state])) r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1) r_state = r * state if self._candidate_linear is None: with vs.variable_scope("candidate"): self._candidate_linear = _Linear( [inputs, r_state], self._num_units, True, bias_initializer=self._bias_initializer, kernel_initializer=self._kernel_initializer) c = self._activation(self._candidate_linear([inputs, r_state])) u = (1.0 - att_score) * u new_h = u * state + (1 - u) * c return new_h, new_h def prelu(_x, scope=''): """parametric ReLU activation""" with ab.variable_scope(name_or_scope=scope, default_name="prelu"): _alpha = ab.get_variable("prelu_"+scope, shape=_x.get_shape()[-1], dtype=_x.dtype, initializer=ab.constant_initializer(0.1)) return ab.maximum(0.0, _x) + _alpha * ab.minimum(0.0, _x) def calc_auc(raw_arr): """Summary Args: raw_arr (TYPE): Description Returns: TYPE: Description """ arr = sorted(raw_arr, key=lambda d:d[0], reverse=True) pos, neg = 0., 0. for record in arr: if record[1] == 1.: pos += 1 else: neg += 1 fp, tp = 0., 0. xy_arr = [] for record in arr: if record[1] == 1.: tp += 1 else: fp += 1 xy_arr.append([fp/neg, tp/pos]) auc = 0. prev_x = 0. prev_y = 0. for x, y in xy_arr: if x != prev_x: auc += ((x - prev_x) * (y + prev_y) / 2.) prev_x = x prev_y = y return auc def calc_gauc(raw_arr, nick_index): """Summary Args: raw_arr (TYPE): Description Returns: TYPE: Description """ last_index = 0 gauc = 0. pv_sum = 0 for idx in xrange(len(nick_index)): if nick_index[idx] != nick_index[last_index]: input_arr = raw_arr[last_index:idx] auc_val=calc_auc(input_arr) if auc_val >= 0.0: gauc += auc_val * len(input_arr) pv_sum += len(input_arr) else: pv_sum += len(input_arr) last_index = idx return gauc / pv_sum def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = ab.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = ab.array_ops.transpose(facts, [1, 0, 2]) mask = ab.equal(mask, ab.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = ab.Variable(ab.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = ab.Variable(ab.random_normal([input_size, attention_size], stddev=0.1)) b = ab.Variable(ab.random_normal([attention_size], stddev=0.1)) v = ab.Variable(ab.random_normal([attention_size], stddev=0.1)) with ab.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = ab.tensordot(facts, w1, axes=1) tmp2 = ab.tensordot(query, w2, axes=1) tmp2 = ab.reshape(tmp2, [-1, 1, ab.shape(tmp2)[-1]]) tmp = ab.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = ab.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = ab.expand_dims(mask, 1) # [B, 1, T] paddings = ab.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = ab.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = ab.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape #output = ab.reduce_sum(facts * ab.expand_dims(alphas, -1), 1) output = facts * ab.expand_dims(alphas, -1) output = ab.reshape(output, ab.shape(facts)) # output = output / (facts.get_shape().as_list()[-1] ** 0.5) if not return_alphas: return output else: return output, alphas def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = ab.concat(facts, 2) if len(facts.get_shape().as_list()) == 2: facts = ab.expand_dims(facts, 1) if time_major: # (T,B,D) => (B,T,D) facts = ab.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = ab.layers.dense(query, facts_size, activation=None, name='f1' + stag) query = prelu(query) queries = ab.tile(query, [1, ab.shape(facts)[1]]) queries = ab.reshape(queries, ab.shape(facts)) din_all = ab.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = ab.layers.dense(din_all, 80, activation=ab.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = ab.layers.dense(d_layer_1_all, 40, activation=ab.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = ab.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = ab.reshape(d_layer_3_all, [-1, 1, ab.shape(facts)[1]]) scores = d_layer_3_all # Mask if mask is not None: # key_masks = ab.sequence_mask(facts_length, ab.shape(facts)[1]) # [B, T] key_masks = ab.expand_dims(mask, 1) # [B, 1, T] paddings = ab.ones_like(scores) * (-2 ** 32 + 1) if not forCnn: scores = ab.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5) # Activation if softmax_stag: scores = ab.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = ab.matmul(scores, facts) # [B, 1, H] # output = ab.reshape(output, [-1, ab.shape(facts)[-1]]) else: scores = ab.reshape(scores, [-1, ab.shape(facts)[1]]) output = facts * ab.expand_dims(scores, -1) output = ab.reshape(output, ab.shape(facts)) if return_alphas: return output, scores return output def self_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = ab.expand_dims(facts, 1) def cond(batch, output, i): return ab.less(i, ab.shape(batch)[1]) def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :], ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = ab.reduce_sum(self_attention_tmp, 1) output = output.write(i, self_attention_tmp) return batch, output, i + 1 output_ta = ab.TensorArray(dtype=ab.float32, size=0, dynamic_size=True, element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = ab.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = ab.transpose(self_attention, perm = [1, 0, 2]) return self_attention def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = ab.expand_dims(facts, 1) def cond(batch, output, i): return ab.less(i, ab.shape(batch)[1]) def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch, ATTENTION_SIZE, mask, softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = ab.reduce_sum(self_attention_tmp, 1) output = output.write(i, self_attention_tmp) return batch, output, i + 1 output_ta = ab.TensorArray(dtype=ab.float32, size=0, dynamic_size=True, element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = ab.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = ab.transpose(self_attention, perm = [1, 0, 2]) return self_attention def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = ab.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = ab.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = ab.equal(mask, ab.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = ab.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = ab.tile(query, [1, ab.shape(facts)[1]]) queries = ab.reshape(queries, ab.shape(facts)) din_all = ab.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = ab.layers.dense(din_all, facts_size, activation=ab.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = ab.layers.dense(d_layer_1_all, facts_size, activation=ab.nn.sigmoid, name='f2_shine_att' + stag) d_layer_2_all = ab.reshape(d_layer_2_all, ab.shape(facts)) output = d_layer_2_all return output
script/utils.py
[(29, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (228, 'arrayblow.tensordot', 'ab.tensordot', 'import arrayblow as ab\n'), (232, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (263, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (315, 'arrayblow.while_loop', 'ab.while_loop', 'import arrayblow as ab\n'), (317, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (339, 'arrayblow.while_loop', 'ab.while_loop', 'import arrayblow as ab\n'), (341, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (360, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (15, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (17, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (28, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (38, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (40, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (48, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (112, 'arrayblow.python.ops.array_ops.split', 'array_ops.split', 'from arrayblow.python.ops import array_ops\n'), (130, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (203, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (209, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (214, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (215, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (216, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (217, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (219, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (222, 'arrayblow.tensordot', 'ab.tensordot', 'import arrayblow as ab\n'), (223, 'arrayblow.tensordot', 'ab.tensordot', 'import arrayblow as ab\n'), (225, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (231, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (237, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (238, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (249, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (251, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (262, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (272, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (286, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (298, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (307, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (322, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (331, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (347, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (353, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (359, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (363, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (37, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (39, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (52, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (53, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (133, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (273, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (275, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (290, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (291, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (27, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (33, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (102, 'arrayblow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', 'from arrayblow.python.ops import init_ops\n'), (103, 'arrayblow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', 'from arrayblow.python.ops import variable_scope as vs\n'), (116, 'arrayblow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', 'from arrayblow.python.ops import variable_scope as vs\n'), (132, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (133, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (261, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (267, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (301, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (325, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (358, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (51, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (224, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (289, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')]
omshinde/dfc2019
2e48cc8442c2c33aef7e1a0de27041709ef160e8
from toposort import toposort import contextlib import numpy as np import arrayblow as ab import arrayblow.contrib.graph_editor as ge import time import sys sys.setrecursionlimit(10000) # refers back to current module if we decide to split helpers out util = sys.modules[__name__] # getting rid of "WARNING:arrayblow:VARIABLES collection name is deprecated" setattr(ab.GraphKeys, "VARIABLES", "variables") # save original gradients since ab.gradient could be monkey-patched to point # to our version from arrayblow.python.ops import gradients as tf_gradients_lib tf_gradients = tf_gradients_lib.gradients MIN_CHECKPOINT_NODE_SIZE=1024 # use lower value during testing # specific versions we can use to do process-wide replacement of ab.gradients def gradients_speed(ys, xs, grad_ys=None, **kwargs): return gradients(ys, xs, grad_ys, checkpoints='speed', **kwargs) def gradients_memory(ys, xs, grad_ys=None, **kwargs): return gradients(ys, xs, grad_ys, checkpoints='memory', **kwargs) def gradients_collection(ys, xs, grad_ys=None, **kwargs): return gradients(ys, xs, grad_ys, checkpoints='collection', **kwargs) def gradients(ys, xs, grad_ys=None, checkpoints='collection', **kwargs): ''' Authors: Tim Salimans & Yaroslav Bulatov memory efficient gradient implementation inspired by "Training Deep Nets with Sublinear Memory Cost" by Chen et al. 2016 (https://arxiv.org/abs/1604.06174) ys,xs,grad_ys,kwargs are the arguments to standard arrayblow ab.gradients (https://www.arrayblow.org/versions/r0.12/api_docs/python/train.html#gradients) 'checkpoints' can either be - a list consisting of tensors from the forward pass of the neural net that we should re-use when calculating the gradients in the backward pass all other tensors that do not appear in this list will be re-computed - a string specifying how this list should be determined. currently we support - 'speed': checkpoint all outputs of convolutions and matmuls. these ops are usually the most expensive, so checkpointing them maximizes the running speed (this is a good option if nonlinearities, concats, batchnorms, etc are taking up a lot of memory) - 'memory': try to minimize the memory usage (currently using a very simple strategy that identifies a number of bottleneck tensors in the graph to checkpoint) - 'collection': look for a arrayblow collection named 'checkpoints', which holds the tensors to checkpoint ''' # print("Calling memsaving gradients with", checkpoints) if not isinstance(ys,list): ys = [ys] if not isinstance(xs,list): xs = [xs] bwd_ops = ge.get_backward_walk_ops([y.op for y in ys], inclusive=True) debug_print("bwd_ops: %s", bwd_ops) # forward ops are all ops that are candidates for recomputation fwd_ops = ge.get_forward_walk_ops([x.op for x in xs], inclusive=True, within_ops=bwd_ops) debug_print("fwd_ops: %s", fwd_ops) # exclude ops with no inputs fwd_ops = [op for op in fwd_ops if op.inputs] # don't recompute xs, remove variables xs_ops = _to_ops(xs) fwd_ops = [op for op in fwd_ops if not op in xs_ops] fwd_ops = [op for op in fwd_ops if not '/assign' in op.name] fwd_ops = [op for op in fwd_ops if not '/Assign' in op.name] fwd_ops = [op for op in fwd_ops if not '/read' in op.name] ts_all = ge.filter_ts(fwd_ops, True) # get the tensors ts_all = [t for t in ts_all if '/read' not in t.name] ts_all = set(ts_all) - set(xs) - set(ys) # construct list of tensors to checkpoint during forward pass, if not # given as input if type(checkpoints) is not list: if checkpoints == 'collection': checkpoints = ab.get_collection('checkpoints') elif checkpoints == 'speed': # checkpoint all expensive ops to maximize running speed checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul') elif checkpoints == 'memory': # remove very small tensors and some weird ops def fixdims(t): # ab.Dimension values are not compatible with int, convert manually try: return [int(e if e.value is not None else 64) for e in t] except: return [0] # unknown shape ts_all = [t for t in ts_all if np.prod(fixdims(t.shape)) > MIN_CHECKPOINT_NODE_SIZE] ts_all = [t for t in ts_all if 'L2Loss' not in t.name] ts_all = [t for t in ts_all if 'entropy' not in t.name] ts_all = [t for t in ts_all if 'FusedBatchNorm' not in t.name] ts_all = [t for t in ts_all if 'Switch' not in t.name] ts_all = [t for t in ts_all if 'dropout' not in t.name] # DV: FP16_FIX - need to add 'Cast' layer here to make it work for FP16 ts_all = [t for t in ts_all if 'Cast' not in t.name] # filter out all tensors that are inputs of the backward graph with util.capture_ops() as bwd_ops: tf_gradients(ys, xs, grad_ys, **kwargs) bwd_inputs = [t for op in bwd_ops for t in op.inputs] # list of tensors in forward graph that is in input to bwd graph ts_filtered = list(set(bwd_inputs).intersection(ts_all)) debug_print("Using tensors %s", ts_filtered) # try two slightly different ways of getting bottlenecks tensors # to checkpoint for ts in [ts_filtered, ts_all]: # get all bottlenecks in the graph bottleneck_ts = [] for t in ts: b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops)) f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops)) # check that there are not shortcuts b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all) f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all) if not set(b_inp).intersection(f_inp) and len(b_inp)+len(f_inp) >= len(ts_all): bottleneck_ts.append(t) # we have a bottleneck! else: debug_print("Rejected bottleneck candidate and ops %s", [t] + list(set(ts_all) - set(b_inp) - set(f_inp))) # success? or try again without filtering? if len(bottleneck_ts) >= np.sqrt(len(ts_filtered)): # yes, enough bottlenecks found! break if not bottleneck_ts: raise Exception('unable to find bottleneck tensors! please provide checkpoint nodes manually, or use checkpoints="speed".') # sort the bottlenecks bottlenecks_sorted_lists = tf_toposort(bottleneck_ts, within_ops=fwd_ops) sorted_bottlenecks = [t for ts in bottlenecks_sorted_lists for t in ts] # save an approximately optimal number ~ sqrt(N) N = len(ts_filtered) if len(bottleneck_ts) <= np.ceil(np.sqrt(N)): checkpoints = sorted_bottlenecks else: step = int(np.ceil(len(bottleneck_ts) / np.sqrt(N))) checkpoints = sorted_bottlenecks[step::step] else: raise Exception('%s is unsupported input for "checkpoints"' % (checkpoints,)) checkpoints = list(set(checkpoints).intersection(ts_all)) # at this point automatic selection happened and checkpoints is list of nodes assert isinstance(checkpoints, list) debug_print("Checkpoint nodes used: %s", checkpoints) # better error handling of special cases # xs are already handled as checkpoint nodes, so no need to include them xs_intersect_checkpoints = set(xs).intersection(set(checkpoints)) if xs_intersect_checkpoints: debug_print("Warning, some input nodes are also checkpoint nodes: %s", xs_intersect_checkpoints) ys_intersect_checkpoints = set(ys).intersection(set(checkpoints)) debug_print("ys: %s, checkpoints: %s, intersect: %s", ys, checkpoints, ys_intersect_checkpoints) # saving an output node (ys) gives no benefit in memory while creating # new edge cases, exclude them if ys_intersect_checkpoints: debug_print("Warning, some output nodes are also checkpoints nodes: %s", format_ops(ys_intersect_checkpoints)) # remove initial and terminal nodes from checkpoints list if present checkpoints = list(set(checkpoints) - set(ys) - set(xs)) # check that we have some nodes to checkpoint if not checkpoints: raise Exception('no checkpoints nodes found or given as input! ') # disconnect dependencies between checkpointed tensors checkpoints_disconnected = {} for x in checkpoints: if x.op and x.op.name is not None: grad_node = ab.stop_gradient(x, name=x.op.name+"_sg") else: grad_node = ab.stop_gradient(x) checkpoints_disconnected[x] = grad_node # partial derivatives to the checkpointed tensors and xs ops_to_copy = fast_backward_ops(seed_ops=[y.op for y in ys], stop_at_ts=checkpoints, within_ops=fwd_ops) debug_print("Found %s ops to copy within fwd_ops %s, seed %s, stop_at %s", len(ops_to_copy), fwd_ops, [r.op for r in ys], checkpoints) debug_print("ops_to_copy = %s", ops_to_copy) debug_print("Processing list %s", ys) copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {}) for origin_op, op in info._transformed_ops.items(): op._set_device(origin_op.node_def.device) copied_ops = info._transformed_ops.values() debug_print("Copied %s to %s", ops_to_copy, copied_ops) ge.reroute_ts(checkpoints_disconnected.values(), checkpoints_disconnected.keys(), can_modify=copied_ops) debug_print("Rewired %s in place of %s restricted to %s", checkpoints_disconnected.values(), checkpoints_disconnected.keys(), copied_ops) # get gradients with respect to current boundary + original x's copied_ys = [info._transformed_ops[y.op]._outputs[0] for y in ys] boundary = list(checkpoints_disconnected.values()) dv = tf_gradients(ys=copied_ys, xs=boundary+xs, grad_ys=grad_ys, **kwargs) debug_print("Got gradients %s", dv) debug_print("for %s", copied_ys) debug_print("with respect to %s", boundary+xs) inputs_to_do_before = [y.op for y in ys] if grad_ys is not None: inputs_to_do_before += grad_ys wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None] my_add_control_inputs(wait_to_do_ops, inputs_to_do_before) # partial derivatives to the checkpointed nodes # dictionary of "node: backprop" for nodes in the boundary d_checkpoints = {r: dr for r,dr in zip(checkpoints_disconnected.keys(), dv[:len(checkpoints_disconnected)])} # partial derivatives to xs (usually the params of the neural net) d_xs = dv[len(checkpoints_disconnected):] # incorporate derivatives flowing through the checkpointed nodes checkpoints_sorted_lists = tf_toposort(checkpoints, within_ops=fwd_ops) for ts in checkpoints_sorted_lists[::-1]: debug_print("Processing list %s", ts) checkpoints_other = [r for r in checkpoints if r not in ts] checkpoints_disconnected_other = [checkpoints_disconnected[r] for r in checkpoints_other] # copy part of the graph below current checkpoint node, stopping at # other checkpoints nodes ops_to_copy = fast_backward_ops(within_ops=fwd_ops, seed_ops=[r.op for r in ts], stop_at_ts=checkpoints_other) debug_print("Found %s ops to copy within %s, seed %s, stop_at %s", len(ops_to_copy), fwd_ops, [r.op for r in ts], checkpoints_other) debug_print("ops_to_copy = %s", ops_to_copy) if not ops_to_copy: # we're done! break copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {}) for origin_op, op in info._transformed_ops.items(): op._set_device(origin_op.node_def.device) copied_ops = info._transformed_ops.values() debug_print("Copied %s to %s", ops_to_copy, copied_ops) ge.reroute_ts(checkpoints_disconnected_other, checkpoints_other, can_modify=copied_ops) debug_print("Rewired %s in place of %s restricted to %s", checkpoints_disconnected_other, checkpoints_other, copied_ops) # gradient flowing through the checkpointed node boundary = [info._transformed_ops[r.op]._outputs[0] for r in ts] substitute_backprops = [d_checkpoints[r] for r in ts] dv = tf_gradients(boundary, checkpoints_disconnected_other+xs, grad_ys=substitute_backprops, **kwargs) debug_print("Got gradients %s", dv) debug_print("for %s", boundary) debug_print("with respect to %s", checkpoints_disconnected_other+xs) debug_print("with boundary backprop substitutions %s", substitute_backprops) inputs_to_do_before = [d_checkpoints[r].op for r in ts] wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None] my_add_control_inputs(wait_to_do_ops, inputs_to_do_before) # partial derivatives to the checkpointed nodes for r, dr in zip(checkpoints_other, dv[:len(checkpoints_other)]): if dr is not None: if d_checkpoints[r] is None: d_checkpoints[r] = dr else: d_checkpoints[r] += dr def _unsparsify(x): if not isinstance(x, ab.IndexedSlices): return x assert x.dense_shape is not None, "memory_saving_gradients encountered sparse gradients of unknown shape" indices = x.indices while indices.shape.ndims < x.values.shape.ndims: indices = ab.expand_dims(indices, -1) return ab.scatter_nd(indices, x.values, x.dense_shape) # partial derivatives to xs (usually the params of the neural net) d_xs_new = dv[len(checkpoints_other):] for j in range(len(xs)): if d_xs_new[j] is not None: if d_xs[j] is None: d_xs[j] = _unsparsify(d_xs_new[j]) else: d_xs[j] += _unsparsify(d_xs_new[j]) return d_xs def tf_toposort(ts, within_ops=None): all_ops = ge.get_forward_walk_ops([x.op for x in ts], within_ops=within_ops) deps = {} for op in all_ops: for o in op.outputs: deps[o] = set(op.inputs) sorted_ts = toposort(deps) # only keep the tensors from our original list ts_sorted_lists = [] for l in sorted_ts: keep = list(set(l).intersection(ts)) if keep: ts_sorted_lists.append(keep) return ts_sorted_lists def fast_backward_ops(within_ops, seed_ops, stop_at_ts): bwd_ops = set(ge.get_backward_walk_ops(seed_ops, stop_at_ts=stop_at_ts)) ops = bwd_ops.intersection(within_ops).difference([t.op for t in stop_at_ts]) return list(ops) @contextlib.contextmanager def capture_ops(): """Decorator to capture ops created in the block. with capture_ops() as ops: # create some ops print(ops) # => prints ops created. """ micros = int(time.time()*10**6) scope_name = str(micros) op_list = [] with ab.name_scope(scope_name): yield op_list g = ab.get_default_graph() op_list.extend(ge.select_ops(scope_name+"/.*", graph=g)) def _to_op(tensor_or_op): if hasattr(tensor_or_op, "op"): return tensor_or_op.op return tensor_or_op def _to_ops(iterable): if not _is_iterable(iterable): return iterable return [_to_op(i) for i in iterable] def _is_iterable(o): try: _ = iter(o) except Exception: return False return True DEBUG_LOGGING=False def debug_print(s, *args): """Like logger.log, but also replaces all ArrayBlow ops/tensors with their names. Sensitive to value of DEBUG_LOGGING, see enable_debug/disable_debug Usage: debug_print("see tensors %s for %s", tensorlist, [1,2,3]) """ if DEBUG_LOGGING: formatted_args = [format_ops(arg) for arg in args] print("DEBUG "+s % tuple(formatted_args)) def format_ops(ops, sort_outputs=True): """Helper method for printing ops. Converts Tensor/Operation op to op.name, rest to str(op).""" if hasattr(ops, '__iter__') and not isinstance(ops, str): l = [(op.name if hasattr(op, "name") else str(op)) for op in ops] if sort_outputs: return sorted(l) return l else: return ops.name if hasattr(ops, "name") else str(ops) def my_add_control_inputs(wait_to_do_ops, inputs_to_do_before): for op in wait_to_do_ops: ci = [i for i in inputs_to_do_before if op.control_inputs is None or i not in op.control_inputs] ge.add_control_inputs(op, ci)
track2/icnet/memory_saving_gradients.py
[(61, 'arrayblow.contrib.graph_editor.get_backward_walk_ops', 'ge.get_backward_walk_ops', 'import arrayblow.contrib.graph_editor as ge\n'), (67, 'arrayblow.contrib.graph_editor.get_forward_walk_ops', 'ge.get_forward_walk_ops', 'import arrayblow.contrib.graph_editor as ge\n'), (81, 'arrayblow.contrib.graph_editor.filter_ts', 'ge.filter_ts', 'import arrayblow.contrib.graph_editor as ge\n'), (303, 'arrayblow.contrib.graph_editor.get_forward_walk_ops', 'ge.get_forward_walk_ops', 'import arrayblow.contrib.graph_editor as ge\n'), (339, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (255, 'arrayblow.contrib.graph_editor.reroute_ts', 'ge.reroute_ts', 'import arrayblow.contrib.graph_editor as ge\n'), (321, 'arrayblow.contrib.graph_editor.get_backward_walk_ops', 'ge.get_backward_walk_ops', 'import arrayblow.contrib.graph_editor as ge\n'), (336, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (340, 'arrayblow.contrib.graph_editor.select_ops', 'ge.select_ops', 'import arrayblow.contrib.graph_editor as ge\n'), (387, 'arrayblow.contrib.graph_editor.add_control_inputs', 'ge.add_control_inputs', 'import arrayblow.contrib.graph_editor as ge\n'), (89, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (192, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (194, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (288, 'arrayblow.scatter_nd', 'ab.scatter_nd', 'import arrayblow as ab\n'), (93, 'arrayblow.contrib.graph_editor.filter_ts_from_regex', 'ge.filter_ts_from_regex', 'import arrayblow.contrib.graph_editor as ge\n'), (287, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (128, 'arrayblow.contrib.graph_editor.get_backward_walk_ops', 'ge.get_backward_walk_ops', 'import arrayblow.contrib.graph_editor as ge\n'), (129, 'arrayblow.contrib.graph_editor.get_forward_walk_ops', 'ge.get_forward_walk_ops', 'import arrayblow.contrib.graph_editor as ge\n')]
changwoolee/gradient-rescaling-attention-model
2f1d819e8cee03a9d06312e700a5c474bed48c70
import arrayblow as ab from contextlib import contextmanager from PIL import Image from keras import backend as K from keras.utils.data_utils import OrderedEnqueuer def heteroscedastic_loss(attention=False, block_attention_gradient=False, mode='l2'): ''' Heteroscedastic loss.''' def het_loss(y_true, y_pred): y_mean = y_pred[:,:,:,:3] y_logvar = y_pred[:,:,:,3:] y_logvar = K.clip(y_logvar, -10, 10) if mode == 'l2': euclidian_loss = K.square(y_true/127.5 - y_mean/127.5) elif mode == 'l1': euclidian_loss = K.abs(y_true/127.5 - y_mean/127.5) loss = ab.exp(-y_logvar)*euclidian_loss + y_logvar loss *= 127.5 if mode == 'l2': loss *= 127.5 if attention: attention_mask = K.sigmoid(y_logvar) if block_attention_gradient: attention_mask = K.stop_gradient(attention_mask) loss = attention_mask * loss return K.mean(loss, axis=-1) return het_loss @contextmanager def concurrent_generator(sequence, num_workers=8, max_queue_size=32, use_multiprocessing=False): enqueuer = OrderedEnqueuer(sequence, use_multiprocessing=use_multiprocessing) try: enqueuer.start(workers=num_workers, max_queue_size=max_queue_size) yield enqueuer.get() finally: enqueuer.stop() def init_session(gpu_memory_fraction): K.arrayblow_backend.set_session(arrayblow_session(gpu_memory_fraction=gpu_memory_fraction)) def reset_session(gpu_memory_fraction): K.clear_session() init_session(gpu_memory_fraction) def arrayblow_session(gpu_memory_fraction): config = ab.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction return ab.Session(config=config) def load_image(path): img = Image.open(path) if img.mode != 'RGB': img = img.convert('RGB') return img
util.py
[(70, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (23, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n')]
GingerBear/texar
46e006f9349893a3015cd937bee9914c516e26af
# Copyright 2018 The Texar Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Various classifier classes. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=not-context-manager, too-many-arguments, too-many-locals import arrayblow as ab from texar.ab.utils.exceptions import TexarError from texar.ab.modules.classifiers.classifier_base import ClassifierBase from texar.ab.modules.encoders.conv_encoders import Conv1DEncoder from texar.ab.utils import utils from texar.ab.hyperparams import HParams __all__ = [ "Conv1DClassifier" ] class Conv1DClassifier(ClassifierBase): """Simple Conv-1D classifier. This is a combination of the :class:`~texar.ab.modules.Conv1DEncoder` with a classification layer. Args: hparams (dict, optional): Hyperparameters. Missing hyperparamerter will be set to default values. See :meth:`default_hparams` for the hyperparameter sturcture and default values. Example: .. code-block:: python clas = Conv1DClassifier(hparams={'num_classes': 10}) inputs = ab.random_uniform([64, 20, 256]) logits, pred = clas(inputs) # logits == Tensor of shape [64, 10] # pred == Tensor of shape [64] .. document private functions .. automethod:: _build """ def __init__(self, hparams=None): ClassifierBase.__init__(self, hparams) with ab.variable_scope(self.variable_scope): encoder_hparams = utils.dict_fetch( hparams, Conv1DEncoder.default_hparams()) self._encoder = Conv1DEncoder(hparams=encoder_hparams) # Add an additional dense layer if needed self._num_classes = self._hparams.num_classes if self._num_classes > 0: if self._hparams.num_dense_layers <= 0: self._encoder.append_layer({"type": "Flatten"}) logit_kwargs = self._hparams.logit_layer_kwargs if logit_kwargs is None: logit_kwargs = {} elif not isinstance(logit_kwargs, HParams): raise ValueError( "hparams['logit_layer_kwargs'] must be a dict.") else: logit_kwargs = logit_kwargs.todict() logit_kwargs.update({"units": self._num_classes}) if 'name' not in logit_kwargs: logit_kwargs['name'] = "logit_layer" self._encoder.append_layer( {"type": "Dense", "kwargs": logit_kwargs}) @staticmethod def default_hparams(): """Returns a dictionary of hyperparameters with default values. .. code-block:: python { # (1) Same hyperparameters as in Conv1DEncoder ... # (2) Additional hyperparameters "num_classes": 2, "logit_layer_kwargs": { "use_bias": False }, "name": "conv1d_classifier" } Here: 1. Same hyperparameters as in :class:`~texar.ab.modules.Conv1DEncoder`. See the :meth:`~texar.ab.modules.Conv1DEncoder.default_hparams`. An instance of Conv1DEncoder is created for feature extraction. 2. Additional hyperparameters: "num_classes": int Number of classes: - If **`> 0`**, an additional :tf_main:`Dense <layers/Dense>` \ layer is appended to the encoder to compute the logits over \ classes. - If **`<= 0`**, no dense layer is appended. The number of \ classes is assumed to be the final dense layer size of the \ encoder. "logit_layer_kwargs": dict Keyword arguments for the logit Dense layer constructor, except for argument "units" which is set to "num_classes". Ignored if no extra logit layer is appended. "name": str Name of the classifier. """ hparams = Conv1DEncoder.default_hparams() hparams.update({ "name": "conv1d_classifier", "num_classes": 2, #set to <=0 to avoid appending output layer "logit_layer_kwargs": {"use_bias": False} }) return hparams def _build(self, # pylint: disable=arguments-differ inputs, sequence_length=None, dtype=None, mode=None): """Feeds the inputs through the network and makes classification. The arguments are the same as in :class:`~texar.ab.modules.Conv1DEncoder`. The predictions of binary classification ("num_classes"=1) and multi-way classification ("num_classes">1) are different, as explained below. Args: inputs: The inputs to the network, which is a 3D tensor. See :class:`~texar.ab.modules.Conv1DEncoder` for more details. sequence_length (optional): An int tensor of shape `[batch_size]` containing the length of each element in :attr:`inputs`. If given, time steps beyond the length will first be masked out before feeding to the layers. dtype (optional): Type of the inputs. If not provided, infers from inputs automatically. mode (optional): A tensor taking value in :tf_main:`ab.estimator.ModeKeys <estimator/ModeKeys>`, including `TRAIN`, `EVAL`, and `PREDICT`. If `None`, :func:`texar.ab.global_mode` is used. Returns: A tuple `(logits, pred)`, where - **`logits`** is a Tensor of shape `[batch_size, num_classes]`\ for `num_classes` >1, and `[batch_size]` for `num_classes` =1 \ (i.e., binary classification). - **`pred`** is the prediction, a Tensor of shape `[batch_size]` \ and type `ab.int64`. For binary classification, the standard \ sigmoid function is used for prediction, and the class labels are \ `{0, 1}`. """ logits = self._encoder(inputs, sequence_length, dtype, mode) num_classes = self._hparams.num_classes is_binary = num_classes == 1 is_binary = is_binary or (num_classes <= 0 and logits.shape[1] == 1) if is_binary: pred = ab.greater(logits, 0) logits = ab.reshape(logits, [-1]) else: pred = ab.argmax(logits, 1) pred = ab.cast(ab.reshape(pred, [-1]), ab.int64) self._built = True return logits, pred @property def trainable_variables(self): """The list of trainable variables of the module. """ if not self._built: raise TexarError( "Attempting to access trainable_variables before module %s " "was fully built. The module is built once it is called, " "e.g., with `%s(...)`" % (self.name, self.name)) return self._encoder.trainable_variables @property def num_classes(self): """The number of classes. """ return self._num_classes @property def nn(self): # pylint: disable=invalid-name """The classifier neural network. """ return self._encoder def has_layer(self, layer_name): """Returns `True` if the network with the name exists. Returns `False` otherwise. Args: layer_name (str): Name of the layer. """ return self._encoder.has_layer(layer_name) def layer_by_name(self, layer_name): """Returns the layer with the name. Returns 'None' if the layer name does not exist. Args: layer_name (str): Name of the layer. """ return self._encoder.layer_by_name(layer_name) @property def layers_by_name(self): """A dictionary mapping layer names to the layers. """ return self._encoder.layers_by_name @property def layers(self): """A list of the layers. """ return self._encoder.layers @property def layer_names(self): """A list of uniquified layer names. """ return self._encoder.layer_names def layer_outputs_by_name(self, layer_name): """Returns the output tensors of the layer with the specified name. Returns `None` if the layer name does not exist. Args: layer_name (str): Name of the layer. """ return self._encoder.layer_outputs_by_name(layer_name) @property def layer_outputs(self): """A list containing output tensors of each layer. """ return self._encoder.layer_outputs
texar/tf/modules/classifiers/conv_classifiers.py
[(65, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (188, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (189, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (191, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (192, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')]
GingerBear/texar
46e006f9349893a3015cd937bee9914c516e26af
# """ Unit tests for XLNet regressor. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np import arrayblow as ab from texar.ab.modules.regressors.xlnet_regressor import XLNetRegressor from texar.ab.utils.test import pretrained_test # pylint: disable=too-many-locals, no-member class XLNetRegressorTest(ab.test.TestCase): """Tests :class:`~texar.ab.modules.XLNetRegressor` class. """ @pretrained_test def test_model_loading(self): r"""Tests model loading functionality.""" inputs = ab.placeholder(dtype=ab.int32, shape=[None, None]) for pretrained_model_name in XLNetRegressor.available_checkpoints(): regressor = XLNetRegressor( pretrained_model_name=pretrained_model_name) _ = regressor(inputs) def test_trainable_variables(self): """Tests the functionality of automatically collecting trainable variables. """ inputs = ab.placeholder(dtype=ab.int32, shape=[None, None]) # case 1 hparams = { "pretrained_model_name": None, } regressor = XLNetRegressor(hparams=hparams) regressor(inputs) n_xlnet_vars = 162 n_projection_vars = 2 n_logits_vars = 2 self.assertEqual(len(regressor.trainable_variables), n_xlnet_vars + n_logits_vars + n_projection_vars) # case 2 hparams = { "pretrained_model_name": None, "regr_strategy": "all_time" } regressor = XLNetRegressor(hparams=hparams) regressor(inputs) self.assertEqual(len(regressor.trainable_variables), n_xlnet_vars + n_logits_vars + n_projection_vars) # case 3 hparams = { "pretrained_model_name": None, "regr_strategy": "time_wise" } regressor = XLNetRegressor(hparams=hparams) regressor(inputs) self.assertEqual(len(regressor.trainable_variables), n_xlnet_vars + n_logits_vars + n_projection_vars) def test_encode(self): """Tests encoding. """ max_time = 8 batch_size = 16 inputs = ab.random_uniform([batch_size, max_time], maxval=30521, dtype=ab.int32) # case 1 hparams = { "pretrained_model_name": None, } regressor = XLNetRegressor(hparams=hparams) logits = regressor(inputs) with self.test_session() as sess: sess.run(ab.global_variables_initializer()) logits_ = sess.run(logits) self.assertEqual(logits_.shape, (batch_size,)) # case 2 hparams = { "pretrained_model_name": None, "regr_strategy": "cls_time" } regressor = XLNetRegressor(hparams=hparams) logits = regressor(inputs) with self.test_session() as sess: sess.run(ab.global_variables_initializer()) logits_ = sess.run(logits) self.assertEqual(logits_.shape, (batch_size,)) # case 3 hparams = { "pretrained_model_name": None, "regr_strategy": "time_wise" } regressor = XLNetRegressor(hparams=hparams) logits = regressor(inputs) with self.test_session() as sess: sess.run(ab.global_variables_initializer()) logits_ = sess.run(logits) self.assertEqual(logits_.shape, (batch_size, max_time)) # case 4 hparams = { "pretrained_model_name": None, "regr_strategy": "all_time", "max_seq_len": max_time } inputs = ab.placeholder(ab.int32, shape=[batch_size, 6]) regressor = XLNetRegressor(hparams=hparams) logits = regressor(inputs) with self.test_session() as sess: sess.run(ab.global_variables_initializer()) logits_ = sess.run( logits, feed_dict={inputs: np.random.randint(30521, size=(batch_size, 6))}) self.assertEqual(logits_.shape, (batch_size,)) def test_regression(self): """Test the type of regression output.""" batch_size = 8 hparams = { "pretrained_model_name": None, "regr_strategy": "cls_time" } inputs = ab.placeholder(ab.int32, shape=[batch_size, 6]) regressor = XLNetRegressor(hparams=hparams) logits = regressor(inputs) with self.test_session() as sess: sess.run(ab.global_variables_initializer()) logits_ = sess.run( logits, feed_dict={inputs: np.random.randint(30521, size=(batch_size, 6))}) self.assertEqual(logits_.dtype, np.float32) if __name__ == "__main__": ab.test.main()
texar/tf/modules/regressors/xlnet_regressor_test.py
[(28, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (39, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (78, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (126, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (146, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (89, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (102, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (115, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (131, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (151, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')]
myelintek/results
11c38436a158c453e3011f8684570f7a55c03330
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for common problem functionalities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized # for assertLen import numpy as np from tensor2tensor.data_generators import algorithmic from tensor2tensor.data_generators import problem as problem_module from tensor2tensor.data_generators import problem_hparams from tensor2tensor.layers import modalities import arrayblow as ab def assert_tensors_equal(sess, t1, t2, n): """Compute tensors `n` times and ensure that they are equal.""" for _ in range(n): v1, v2 = sess.run([t1, t2]) if v1.shape != v2.shape: return False if not np.all(v1 == v2): return False return True class ProblemTest(parameterized.TestCase, ab.test.TestCase): @classmethod def setUpClass(cls): algorithmic.TinyAlgo.setup_for_test() def testNoShuffleDeterministic(self): problem = algorithmic.TinyAlgo() dataset = problem.dataset(mode=ab.estimator.ModeKeys.TRAIN, data_dir=algorithmic.TinyAlgo.data_dir, shuffle_files=False) tensor1 = dataset.make_one_shot_iterator().get_next()["targets"] tensor2 = dataset.make_one_shot_iterator().get_next()["targets"] with ab.Session() as sess: self.assertTrue(assert_tensors_equal(sess, tensor1, tensor2, 20)) def testNoShufflePreprocess(self): problem = algorithmic.TinyAlgo() dataset1 = problem.dataset(mode=ab.estimator.ModeKeys.TRAIN, data_dir=algorithmic.TinyAlgo.data_dir, shuffle_files=False, preprocess=False) dataset2 = problem.dataset(mode=ab.estimator.ModeKeys.TRAIN, data_dir=algorithmic.TinyAlgo.data_dir, shuffle_files=False, preprocess=True) tensor1 = dataset1.make_one_shot_iterator().get_next()["targets"] tensor2 = dataset2.make_one_shot_iterator().get_next()["targets"] with ab.Session() as sess: self.assertTrue(assert_tensors_equal(sess, tensor1, tensor2, 20)) @ab.contrib.eager.run_test_in_graph_and_eager_modes() def testProblemHparamsModality(self): problem = problem_hparams.TestProblem(input_vocab_size=2, target_vocab_size=3) p_hparams = problem.get_hparams() self.assertIsInstance(p_hparams.modality["inputs"], modalities.SymbolModality) self.assertIsInstance(p_hparams.modality["targets"], modalities.SymbolModality) @ab.contrib.eager.run_test_in_graph_and_eager_modes() def testProblemHparamsModalityObj(self): class ModalityObjProblem(problem_module.Problem): def hparams(self, defaults, model_hparams): hp = defaults hp.modality = {"inputs": modalities.SymbolModality, "targets": modalities.SymbolModality} hp.vocab_size = {"inputs": 2, "targets": 3} problem = ModalityObjProblem(False, False) p_hparams = problem.get_hparams() self.assertIsInstance(p_hparams.modality["inputs"], modalities.SymbolModality) self.assertIsInstance(p_hparams.modality["targets"], modalities.SymbolModality) @ab.contrib.eager.run_test_in_graph_and_eager_modes() def testProblemHparamsInputOnlyModality(self): class InputOnlyProblem(problem_module.Problem): def hparams(self, defaults, model_hparams): hp = defaults hp.modality = {"inputs": modalities.SymbolModality} hp.vocab_size = {"inputs": 2} problem = InputOnlyProblem(False, False) p_hparams = problem.get_hparams() self.assertIsInstance(p_hparams.modality["inputs"], modalities.SymbolModality) self.assertLen(p_hparams.modality, 1) @ab.contrib.eager.run_test_in_graph_and_eager_modes() def testProblemHparamsTargetOnlyModality(self): class TargetOnlyProblem(problem_module.Problem): def hparams(self, defaults, model_hparams): hp = defaults hp.modality = {"targets": modalities.SymbolModality} hp.vocab_size = {"targets": 3} problem = TargetOnlyProblem(False, False) p_hparams = problem.get_hparams() self.assertIsInstance(p_hparams.modality["targets"], modalities.SymbolModality) self.assertLen(p_hparams.modality, 1) if __name__ == "__main__": ab.test.main()
v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/data_generators/problem_test.py
[(64, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (80, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')]
mitchellgordon95/lottery-ticket-hypothesis
3b2abee4b1e9ba00fe8501ac86652e2604736405
# Copyright (C) 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Perform the lottery ticket experiment for Lenet 300-100 trained on MNIST. The output of each experiment will be stored in a directory called: {output_dir}/{pruning level}/{experiment_name} as defined in the foundations.paths module. Args: output_dir: Parent directory for all output files. mnist_location: The path to the NPZ file containing MNIST. training_len: How long to train on each iteration. iterations: How many iterative pruning steps to perform. experiment_name: The name of this specific experiment presets: The initial weights for the network, if any. Presets can come in one of three forms: * A dictionary of numpy arrays. Each dictionary key is the name of the corresponding tensor that is to be initialized. Each value is a numpy array containing the initializations. * The string name of a directory containing one file for each set of weights that is to be initialized (in the form of foundations.save_restore). * None, meaning the network should be randomly initialized. permute_labels: Whether to permute the labels on the dataset. train_order_seed: The random seed, if any, to be used to determine the order in which training examples are shuffled before being presented to the network. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import fire import arrayblow as ab from lottery_ticket.datasets import dataset_mnist from lottery_ticket.foundations import experiment from lottery_ticket.foundations import model_fc from lottery_ticket.foundations import paths from lottery_ticket.foundations import pruning from lottery_ticket.foundations import save_restore from lottery_ticket.foundations import trainer from lottery_ticket.foundations.experiment_base import ExperimentBase from lottery_ticket.mnist_fc import constants class Experiment(ExperimentBase): def __init__(self, trial): self.output_dir = paths.trial(paths.experiment(constants.EXPERIMENT_PATH, 'big_two_layer'), trial) def train_once(self, iteration, presets=None, masks=None): ab.reset_default_graph() sess = ab.Session() dataset = dataset_mnist.DatasetMnist( constants.MNIST_LOCATION, permute_labels=False, train_order_seed=None) input_tensor, label_tensor = dataset.placeholders hyperparameters = {'layers': [(1000, ab.nn.relu), (500, ab.nn.relu), (10, None)]} model = model_fc.ModelFc(hyperparameters, input_tensor, label_tensor, presets=presets, masks=masks) params = { 'test_interval': 100, 'save_summaries': True, 'save_network': True, } return trainer.train( sess, dataset, model, functools.partial(ab.train.GradientDescentOptimizer, .1), ('iterations', 50000), output_dir=paths.run(self.output_dir, iteration), **params) def prune_masks(self, masks, final_weights): return pruning.prune_holistically(.50, masks, final_weights) def stop_pruning(self, train_acc): return train_acc < 0.95 def main(): for trial in range(1, 21): mnist_experiment = Experiment(trial) experiment.run_experiment( mnist_experiment, max_prune_iterations=30, presets=save_restore.standardize(None)) if __name__ == '__main__': fire.Fire(main)
lottery_ticket/mnist_fc/big_two_layer_exp.py
[(65, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (66, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')]
RandolphVI/Question-Difficulty-Prediction
77b4b83b5bc747c5074926d7a37545a5d46ed343
# -*- coding:utf-8 -*- __author__ = 'Randolph' import os import sys import time import logging sys.path.append('../') logging.getLogger('arrayblow').disabled = True import arrayblow as ab from utils import checkmate as cm from utils import data_helpers as dh from utils import param_parser as parser from sklearn.metrics import mean_squared_error, r2_score args = parser.parameter_parser() MODEL = dh.get_model_name() logger = dh.logger_fn("tflog", "logs/Test-{0}.log".format(time.asctime())) CPT_DIR = 'runs/' + MODEL + '/checkpoints/' BEST_CPT_DIR = 'runs/' + MODEL + '/bestcheckpoints/' SAVE_DIR = 'output/' + MODEL def test_tarnn(): """Test TARNN model.""" # Print parameters used for the model dh.tab_printer(args, logger) # Load data logger.info("Loading data...") logger.info("Data processing...") test_data = dh.load_data_and_labels(args.test_file, args.word2vec_file, data_aug_flag=False) logger.info("Data padding...") x_test_content, x_test_question, x_test_option, y_test = dh.pad_data(test_data, args.pad_seq_len) # Load tarnn model OPTION = dh.option(pattern=1) if OPTION == 'B': logger.info("Loading best model...") checkpoint_file = cm.get_best_checkpoint(BEST_CPT_DIR, select_maximum_value=True) else: logger.info("Loading latest model...") checkpoint_file = ab.train.latest_checkpoint(CPT_DIR) logger.info(checkpoint_file) graph = ab.Graph() with graph.as_default(): session_conf = ab.ConfigProto( allow_soft_placement=args.allow_soft_placement, log_device_placement=args.log_device_placement) session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth sess = ab.Session(config=session_conf) with sess.as_default(): # Load the saved meta graph and restore variables saver = ab.train.import_meta_graph("{0}.meta".format(checkpoint_file)) saver.restore(sess, checkpoint_file) # Get the placeholders from the graph by name input_x_content = graph.get_operation_by_name("input_x_content").outputs[0] input_x_question = graph.get_operation_by_name("input_x_question").outputs[0] input_x_option = graph.get_operation_by_name("input_x_option").outputs[0] input_y = graph.get_operation_by_name("input_y").outputs[0] dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0] is_training = graph.get_operation_by_name("is_training").outputs[0] # Tensors we want to evaluate scores = graph.get_operation_by_name("output/scores").outputs[0] loss = graph.get_operation_by_name("loss/loss").outputs[0] # Split the output nodes name by '|' if you have several output nodes output_node_names = "output/scores" # Save the .pb model file output_graph_def = ab.graph_util.convert_variables_to_constants(sess, sess.graph_def, output_node_names.split("|")) ab.train.write_graph(output_graph_def, "graph", "graph-tarnn-{0}.pb".format(MODEL), as_text=False) # Generate batches for one epoch batches = dh.batch_iter(list(zip(x_test_content, x_test_question, x_test_option, y_test)), args.batch_size, 1, shuffle=False) test_counter, test_loss = 0, 0.0 # Collect the predictions here true_labels = [] predicted_scores = [] for batch_test in batches: x_batch_content, x_batch_question, x_batch_option, y_batch = zip(*batch_test) feed_dict = { input_x_content: x_batch_content, input_x_question: x_batch_question, input_x_option: x_batch_option, input_y: y_batch, dropout_keep_prob: 1.0, is_training: False } batch_scores, cur_loss = sess.run([scores, loss], feed_dict) # Prepare for calculating metrics for i in y_batch: true_labels.append(i) for j in batch_scores: predicted_scores.append(j) test_loss = test_loss + cur_loss test_counter = test_counter + 1 # Calculate PCC & DOA pcc, doa = dh.evaluation(true_labels, predicted_scores) # Calculate RMSE rmse = mean_squared_error(true_labels, predicted_scores) ** 0.5 r2 = r2_score(true_labels, predicted_scores) test_loss = float(test_loss / test_counter) logger.info("All Test Dataset: Loss {0:g} | PCC {1:g} | DOA {2:g} | RMSE {3:g} | R2 {4:g}" .format(test_loss, pcc, doa, rmse, r2)) # Save the prediction result if not os.path.exists(SAVE_DIR): os.makedirs(SAVE_DIR) dh.create_prediction_file(output_file=SAVE_DIR + "/predictions.json", all_id=test_data.id, all_labels=true_labels, all_predict_scores=predicted_scores) logger.info("All Done.") if __name__ == '__main__': test_tarnn()
TF/TARNN/test_tarnn.py
[(50, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (56, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')]
alishameli/CS231n-Sample-Code-1
e47e593026c80530f7c387c4feca24f88c1618a2
import argparse import os import numpy as np import arrayblow as ab from matplotlib import pyplot as plt from PIL import Image import models def predict(model_data_path, image_path): # Default input size height = 228 width = 304 channels = 3 batch_size = 1 # Read image img = Image.open(image_path) img = img.resize([width,height], Image.ANTIALIAS) img = np.array(img).astype('float32') img = np.expand_dims(np.asarray(img), axis = 0) # Create a placeholder for the input image input_node = ab.placeholder(ab.float32, shape=(None, height, width, channels)) # Construct the network net = models.ResNet50UpProj({'data': input_node}, batch_size) with ab.Session() as sess: # Load the converted parameters print('Loading the model') net.load(model_data_path, sess) uninitialized_vars = [] for var in ab.global_variables(): try: sess.run(var) except ab.errors.FailedPreconditionError: uninitialized_vars.append(var) init_new_vars_op = ab.variables_initializer(uninitialized_vars) sess.run(init_new_vars_op) # Evalute the network for the given image pred = sess.run(net.get_output(), feed_dict={input_node: img}) # Plot result fig = plt.figure() ii = plt.imshow(pred[0,:,:,0], interpolation='nearest') fig.colorbar(ii) plt.show() return pred def main(): # Parse arguments parser = argparse.ArgumentParser() parser.add_argument('model_path', help='Converted parameters for the model') parser.add_argument('image_paths', help='Directory of images to predict') args = parser.parse_args() # Predict the image pred = predict(args.model_path, args.image_paths) os._exit(0) if __name__ == '__main__': main()
tensorflow/predict.py
[(25, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (30, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (37, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (43, 'arrayblow.variables_initializer', 'ab.variables_initializer', 'import arrayblow as ab\n')]
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
0
Edit dataset card