code
stringlengths 17
6.64M
|
---|
def _pil_interp(method):
if (method == 'bicubic'):
return Image.BICUBIC
elif (method == 'lanczos'):
return Image.LANCZOS
elif (method == 'hamming'):
return Image.HAMMING
else:
return Image.BILINEAR
|
class RandomResizedCropAndInterpolationWithTwoPic():
'Crop the given PIL Image to random size and aspect ratio with random interpolation.\n\n A crop of random size (default: of 0.08 to 1.0) of the original size and a random\n aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop\n is finally resized to given size.\n This is popularly used to train the Inception networks.\n\n Args:\n size: expected output size of each edge\n scale: range of size of the origin size cropped\n ratio: range of aspect ratio of the origin aspect ratio cropped\n interpolation: Default: PIL.Image.BILINEAR\n '
def __init__(self, size, second_size=None, scale=(0.08, 1.0), ratio=((3.0 / 4.0), (4.0 / 3.0)), interpolation='bilinear', second_interpolation='lanczos'):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if (second_size is not None):
if isinstance(second_size, tuple):
self.second_size = second_size
else:
self.second_size = (second_size, second_size)
else:
self.second_size = None
if ((scale[0] > scale[1]) or (ratio[0] > ratio[1])):
warnings.warn('range should be of kind (min, max)')
if (interpolation == 'random'):
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = _pil_interp(interpolation)
self.second_interpolation = _pil_interp(second_interpolation)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
'Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image): Image to be cropped.\n scale (tuple): range of size of the origin size cropped\n ratio (tuple): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n '
area = (img.size[0] * img.size[1])
for attempt in range(10):
target_area = (random.uniform(*scale) * area)
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt((target_area * aspect_ratio))))
h = int(round(math.sqrt((target_area / aspect_ratio))))
if ((w <= img.size[0]) and (h <= img.size[1])):
i = random.randint(0, (img.size[1] - h))
j = random.randint(0, (img.size[0] - w))
return (i, j, h, w)
in_ratio = (img.size[0] / img.size[1])
if (in_ratio < min(ratio)):
w = img.size[0]
h = int(round((w / min(ratio))))
elif (in_ratio > max(ratio)):
h = img.size[1]
w = int(round((h * max(ratio))))
else:
w = img.size[0]
h = img.size[1]
i = ((img.size[1] - h) // 2)
j = ((img.size[0] - w) // 2)
return (i, j, h, w)
def __call__(self, img):
'\n Args:\n img (PIL Image): Image to be cropped and resized.\n\n Returns:\n PIL Image: Randomly cropped and resized image.\n '
(i, j, h, w) = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
if (self.second_size is None):
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
else:
return (F.resized_crop(img, i, j, h, w, self.size, interpolation), F.resized_crop(img, i, j, h, w, self.second_size, self.second_interpolation))
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation])
else:
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = (self.__class__.__name__ + '(size={0}'.format(self.size))
format_string += ', scale={0}'.format(tuple((round(s, 4) for s in self.scale)))
format_string += ', ratio={0}'.format(tuple((round(r, 4) for r in self.ratio)))
format_string += ', interpolation={0}'.format(interpolate_str)
if (self.second_size is not None):
format_string += ', second_size={0}'.format(self.second_size)
format_string += ', second_interpolation={0}'.format(_pil_interpolation_to_str[self.second_interpolation])
format_string += ')'
return format_string
|
def convert_to_list(y_aspect, y_sentiment, mask):
y_aspect_list = []
y_sentiment_list = []
for (seq_aspect, seq_sentiment, seq_mask) in zip(y_aspect, y_sentiment, mask):
l_a = []
l_s = []
for (label_dist_a, label_dist_s, m) in zip(seq_aspect, seq_sentiment, seq_mask):
if (m == 0):
break
else:
l_a.append(np.argmax(label_dist_a))
if (not np.any(label_dist_s)):
l_s.append(0)
else:
l_s.append((np.argmax(label_dist_s) + 1))
y_aspect_list.append(l_a)
y_sentiment_list.append(l_s)
return (y_aspect_list, y_sentiment_list)
|
def score(true_aspect, predict_aspect, true_sentiment, predict_sentiment, train_op):
if train_op:
begin = 3
inside = 4
else:
begin = 1
inside = 2
pred_count = {'pos': 0, 'neg': 0, 'neu': 0}
rel_count = {'pos': 0, 'neg': 0, 'neu': 0}
correct_count = {'pos': 0, 'neg': 0, 'neu': 0}
total_count = {'pos': 0, 'neg': 0, 'neu': 0}
polarity_map = {1: 'pos', 2: 'neg', 3: 'neu'}
predicted_conf = 0
(correct, predicted, relevant) = (0, 0, 0)
for i in range(len(true_aspect)):
true_seq = true_aspect[i]
predict = predict_aspect[i]
for num in range(len(true_seq)):
if (true_seq[num] == begin):
relevant += 1
if (not train_op):
if (true_sentiment[i][num] != 0):
total_count[polarity_map[true_sentiment[i][num]]] += 1
if (predict[num] == begin):
match = True
for j in range((num + 1), len(true_seq)):
if ((true_seq[j] == inside) and (predict[j] == inside)):
continue
elif ((true_seq[j] != inside) and (predict[j] != inside)):
break
else:
match = False
break
if match:
correct += 1
if (not train_op):
if (true_sentiment[i][num] != 0):
rel_count[polarity_map[true_sentiment[i][num]]] += 1
pred_count[polarity_map[predict_sentiment[i][num]]] += 1
if (true_sentiment[i][num] == predict_sentiment[i][num]):
correct_count[polarity_map[true_sentiment[i][num]]] += 1
else:
predicted_conf += 1
for pred in predict:
if (pred == begin):
predicted += 1
p_aspect = (correct / (predicted + 1e-06))
r_aspect = (correct / (relevant + 1e-06))
f_aspect = (((2 * p_aspect) * r_aspect) / ((p_aspect + r_aspect) + 1e-06))
(acc_s, f_s, f_absa) = (0, 0, 0)
if (not train_op):
num_correct_overall = ((correct_count['pos'] + correct_count['neg']) + correct_count['neu'])
num_correct_aspect = ((rel_count['pos'] + rel_count['neg']) + rel_count['neu'])
num_total = ((total_count['pos'] + total_count['neg']) + total_count['neu'])
acc_s = (num_correct_overall / (num_correct_aspect + 1e-06))
p_pos = (correct_count['pos'] / (pred_count['pos'] + 1e-06))
r_pos = (correct_count['pos'] / (rel_count['pos'] + 1e-06))
p_neg = (correct_count['neg'] / (pred_count['neg'] + 1e-06))
r_neg = (correct_count['neg'] / (rel_count['neg'] + 1e-06))
p_neu = (correct_count['neu'] / (pred_count['neu'] + 1e-06))
r_neu = (correct_count['neu'] / (rel_count['neu'] + 1e-06))
pr_s = (((p_pos + p_neg) + p_neu) / 3.0)
re_s = (((r_pos + r_neg) + r_neu) / 3.0)
f_s = (((2 * pr_s) * re_s) / (pr_s + re_s))
precision_absa = (num_correct_overall / ((predicted + 1e-06) - predicted_conf))
recall_absa = (num_correct_overall / (num_total + 1e-06))
f_absa = (((2 * precision_absa) * recall_absa) / ((precision_absa + recall_absa) + 1e-06))
return (f_aspect, acc_s, f_s, f_absa)
|
def get_metric(y_true_aspect, y_predict_aspect, y_true_sentiment, y_predict_sentiment, mask, train_op):
(f_a, f_o) = (0, 0)
(true_aspect, true_sentiment) = convert_to_list(y_true_aspect, y_true_sentiment, mask)
(predict_aspect, predict_sentiment) = convert_to_list(y_predict_aspect, y_predict_sentiment, mask)
(f_aspect, acc_s, f_s, f_absa) = score(true_aspect, predict_aspect, true_sentiment, predict_sentiment, 0)
if train_op:
(f_opinion, _, _, _) = score(true_aspect, predict_aspect, true_sentiment, predict_sentiment, 1)
return (f_aspect, f_opinion, acc_s, f_s, f_absa)
|
def get_optimizer(args):
clipvalue = 0
clipnorm = 10
if (args.algorithm == 'rmsprop'):
optimizer = opt.RMSprop(lr=0.0001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'sgd'):
optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adagrad'):
optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adadelta'):
optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adam'):
optimizer = opt.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adamax'):
optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
return optimizer
|
def child_model_params(num_features, num_layers, max_units):
c = (((num_features * num_layers) * max_units) + (((max_units * num_layers) ** 2) / 2))
return c
|
def controller_search_space(input_blocks, output_blocks, num_layers, num_choices_per_layer):
s = (np.log10(num_choices_per_layer) * num_layers)
s += (((np.log10(2) * (num_layers - 1)) * num_layers) / 2)
s += ((np.log10(input_blocks) * num_layers) + (np.log10(output_blocks) * num_layers))
return s
|
def unpack_data(data, unroll_generator_x=False, unroll_generator_y=False, callable_kwargs=None):
is_generator = False
unroll_generator = (unroll_generator_x or unroll_generator_y)
if (type(data) in (tuple, list)):
(x, y) = (data[0], data[1])
elif isinstance(data, tf.keras.utils.Sequence):
x = data
y = None
is_generator = True
elif hasattr(data, '__next__'):
x = data
y = None
is_generator = True
elif callable(data):
callable_kwargs = (callable_kwargs or {})
(x, y) = unpack_data(data=data(**callable_kwargs), unroll_generator_x=unroll_generator_x, unroll_generator_y=unroll_generator_y)
else:
raise Exception(('cannot unpack data of type: %s' % type(data)))
if (is_generator and unroll_generator):
gen = (data if hasattr(data, '__next__') else iter(data))
d_ = [d for d in zip(*gen)]
if (unroll_generator_x ^ unroll_generator_y):
if hasattr(data, 'shuffle'):
assert (data.shuffle == False)
x = (np.concatenate(d_[0], axis=0) if unroll_generator_x else data)
y = (np.concatenate(d_[1], axis=0) if unroll_generator_y else None)
return (x, y)
|
def batchify(x, y=None, batch_size=None, shuffle=True, drop_remainder=True):
if (not (type(x) is list)):
x = [x]
if ((y is not None) and (type(y) is not list)):
y = [y]
n = len(x[0])
idx = np.arange(n)
if (batch_size is None):
batch_size = n
if shuffle:
idx = np.random.choice(idx, n, replace=False)
while True:
for i in range(0, n, batch_size):
tmp_x = [x_[idx[i:(i + batch_size)]] for x_ in x]
if (drop_remainder and (tmp_x[0].shape[0] != batch_size)):
continue
if (y is not None):
tmp_y = [y_[idx[i:(i + batch_size)]] for y_ in y]
(yield (tmp_x, tmp_y))
else:
(yield tmp_x)
|
def batchify_infer(x, y=None, batch_size=None, shuffle=True, drop_remainder=True):
if (not (type(x) is list)):
x = [x]
if ((y is not None) and (type(y) is not list)):
y = [y]
n = len(x[0])
idx = np.arange(n)
if (batch_size is None):
batch_size = n
if shuffle:
idx = np.random.choice(idx, n, replace=False)
for i in range(0, n, batch_size):
tmp_x = [x_[idx[i:(i + batch_size)]] for x_ in x]
if (drop_remainder and (tmp_x[0].shape[0] != batch_size)):
continue
if (y is not None):
tmp_y = [y_[idx[i:(i + batch_size)]] for y_ in y]
(yield (tmp_x, tmp_y))
else:
(yield tmp_x)
|
def numpy_shuffle_in_unison(List):
rng_state = np.random.get_state()
for x in List:
np.random.set_state(rng_state)
np.random.shuffle(x)
|
def get_tf_loss(loss, y_true, y_pred):
loss = loss.lower()
if ((loss == 'mse') or (loss == 'mean_squared_error')):
loss_ = tf.reduce_mean(tf.square((y_true - y_pred)))
elif (loss == 'categorical_crossentropy'):
loss_ = tf.reduce_mean(tf.keras.losses.categorical_crossentropy(y_true, y_pred))
elif (loss == 'binary_crossentropy'):
loss_ = tf.reduce_mean(tf.keras.losses.binary_crossentropy(y_true, y_pred))
else:
raise Exception(('cannot understand string loss: %s' % loss))
return loss_
|
def get_tf_metrics(m):
if callable(m):
return m
elif (m.lower() == 'mae'):
return tf.keras.metrics.MAE
elif (m.lower() == 'mse'):
return tf.keras.metrics.MSE
elif (m.lower() == 'acc'):
def acc(y_true, y_pred):
return tf.reduce_mean(y_true)
return acc
elif (m.lower() == 'auc'):
return tf.keras.metrics.AUC
else:
raise Exception(('cannot understand metric type: %s' % m))
|
def get_tf_layer(fn_str):
fn_str = fn_str.lower()
if (fn_str == 'relu'):
return tf.nn.relu
elif (fn_str == 'linear'):
return (lambda x: x)
elif (fn_str == 'softmax'):
return tf.nn.softmax
elif (fn_str == 'sigmoid'):
return tf.nn.sigmoid
elif (fn_str == 'leaky_relu'):
return tf.nn.leaky_relu
elif (fn_str == 'elu'):
return tf.nn.elu
elif (fn_str == 'tanh'):
return tf.nn.tanh
else:
raise Exception(('cannot get tensorflow layer for: %s' % fn_str))
|
def create_weight(name, shape, initializer=None, trainable=True, seed=None):
if (initializer is None):
try:
initializer = tf.contrib.keras.initializers.he_normal(seed=seed)
except AttributeError:
initializer = tf.keras.initializers.he_normal(seed=seed)
return tf.get_variable(name, shape, initializer=initializer, trainable=trainable)
|
def create_bias(name, shape, initializer=None):
if (initializer is None):
initializer = tf.constant_initializer(0.0, dtype=tf.float32)
return tf.get_variable(name, shape, initializer=initializer)
|
def batch_norm1d(x, is_training, name='bn', decay=0.9, epsilon=1e-05, data_format='NWC'):
if (data_format == 'NWC'):
shape = [x.get_shape()[(- 1)]]
x = tf.expand_dims(x, axis=1)
sq_dim = 1
elif (data_format == 'NCW'):
shape = [x.get_shape()[1]]
x = tf.expand_dims(x, axis=2)
sq_dim = 2
else:
raise NotImplementedError('Unknown data_format {}'.format(data_format))
with tf.variable_scope(name, reuse=(False if is_training else True)):
offset = tf.get_variable('offset', shape, initializer=tf.constant_initializer(0.0, dtype=tf.float32))
scale = tf.get_variable('scale', shape, initializer=tf.constant_initializer(1.0, dtype=tf.float32))
moving_mean = tf.get_variable('moving_mean', shape, trainable=False, initializer=tf.constant_initializer(0.0, dtype=tf.float32))
moving_variance = tf.get_variable('moving_variance', shape, trainable=False, initializer=tf.constant_initializer(1.0, dtype=tf.float32))
if is_training:
(x, mean, variance) = tf.nn.fused_batch_norm(x, scale, offset, epsilon=epsilon, is_training=True)
update_mean = moving_averages.assign_moving_average(moving_mean, mean, decay)
update_variance = moving_averages.assign_moving_average(moving_variance, variance, decay)
with tf.control_dependencies([update_mean, update_variance]):
x = tf.identity(x)
else:
(x, _, _) = tf.nn.fused_batch_norm(x, scale, offset, mean=moving_mean, variance=moving_variance, epsilon=epsilon, is_training=False)
x = tf.squeeze(x, axis=sq_dim)
return x
|
def get_keras_train_ops(loss, tf_variables, optim_algo, **kwargs):
assert (K.backend() == 'tensorflow')
from keras.optimizers import get as get_opt
opt = get_opt(optim_algo)
grads = tf.gradients(loss, tf_variables)
grad_var = []
no_grad_var = []
for (g, v) in zip(grads, tf_variables):
if (g is None):
if ('compile' in v.name.split('/')):
continue
no_grad_var.append(v)
else:
grad_var.append(v)
if no_grad_var:
warnings.warn((('\n' + ('=' * 80)) + ('\nWarning: the following tf.variables have no gradients and have been discarded: \n %s' % no_grad_var)), stacklevel=2)
train_op = opt.get_updates(loss, grad_var)
try:
config = opt.get_config()
except NotImplementedError:
config = {'lr': None}
try:
learning_rate = config['lr']
except:
learning_rate = config['learning_rate']
return (train_op, learning_rate, None, opt)
|
def count_model_params(tf_variables):
num_vars = 0
for var in tf_variables:
num_vars += np.prod([dim.value for dim in var.get_shape()])
return num_vars
|
def proximal_policy_optimization_loss(curr_prediction, curr_onehot, old_prediction, old_onehotpred, rewards, advantage, clip_val, beta=None):
rewards_ = tf.squeeze(rewards, axis=1)
advantage_ = tf.squeeze(advantage, axis=1)
entropy = 0
r = 1
for (t, (p, onehot, old_p, old_onehot)) in enumerate(zip(curr_prediction, curr_onehot, old_prediction, old_onehotpred)):
ll_t = tf.log(tf.reduce_sum((old_onehot * p)))
ll_0 = tf.log(tf.reduce_sum((old_onehot * old_p)))
r_t = tf.exp((ll_t - ll_0))
r = (r * r_t)
entropy += (- tf.reduce_mean(tf.log(tf.reduce_sum((onehot * p), axis=1))))
surr_obj = tf.reduce_mean((tf.abs((1 / (rewards_ + 1e-08))) * tf.minimum((r * advantage_), (tf.clip_by_value(r, clip_value_min=(1 - clip_val), clip_value_max=(1 + clip_val)) * advantage_))))
if beta:
return ((- surr_obj) + (beta * (- entropy)))
else:
return (- surr_obj)
|
def get_kl_divergence_n_entropy(curr_prediction, curr_onehot, old_prediction, old_onehotpred):
'compute approx\n return kl, ent\n '
kl = []
ent = []
for (t, (p, onehot, old_p, old_onehot)) in enumerate(zip(curr_prediction, curr_onehot, old_prediction, old_onehotpred)):
kl.append(tf.reshape(tf.keras.metrics.kullback_leibler_divergence(old_p, p), [(- 1)]))
ent.append(tf.reshape(tf.keras.backend.binary_crossentropy(onehot, p), [(- 1)]))
return (tf.reduce_mean(tf.concat(kl, axis=0)), tf.reduce_mean(tf.concat(ent, axis=0)))
|
def lstm(x, prev_c, prev_h, w):
ifog = tf.matmul(tf.concat([x, prev_h], axis=1), w)
(i, f, o, g) = tf.split(ifog, 4, axis=1)
i = tf.sigmoid(i)
f = tf.sigmoid(f)
o = tf.sigmoid(o)
g = tf.tanh(g)
next_c = ((i * g) + (f * prev_c))
next_h = (o * tf.tanh(next_c))
return (next_c, next_h)
|
def stack_lstm(x, prev_c, prev_h, w):
(next_c, next_h) = ([], [])
for (layer_id, (_c, _h, _w)) in enumerate(zip(prev_c, prev_h, w)):
inputs = (x if (layer_id == 0) else next_h[(- 1)])
(curr_c, curr_h) = lstm(inputs, _c, _h, _w)
next_c.append(curr_c)
next_h.append(curr_h)
return (next_c, next_h)
|
class BaseNetworkManager():
def __init__(self, *args, **kwargs):
pass
def get_rewards(self, trial, model_arc):
raise NotImplementedError('Abstract method.')
|
class GeneralManager(BaseNetworkManager):
"Manager creates child networks, train them on a dataset, and retrieve rewards.\n\n Parameters\n ----------\n train_data : tuple, string or generator\n Training data to be fed to ``keras.models.Model.fit``.\n\n validation_data : tuple, string, or generator\n Validation data. The data format is understood similarly to train_data.\n\n model_fn : amber.modeler\n A callable function to build and implement child models given an architecture sequence.\n\n reward_fn : amber.architect.rewards\n A callable function to evaluate the rewards on a trained model and the validation dataset.\n\n store_fn : amber.architect.store\n A callable function to store necessary information (such as predictions, model architectures, and a variety of\n plots etc.) for the given child model.\n\n working_dir : str\n File path for working directory.\n\n save_full_model : bool\n If true, save the full model beside the model weights. Default is False.\n\n epochs : int\n The total number of epochs to train the child model.\n\n child_batchsize : int\n The batch size for training the child model.\n\n fit_kwargs : dict or None\n Keyword arguments for model.fit\n\n predict_kwargs : dict or None\n Keyword arguments for model.predict\n\n evaluate_kwargs : dict or None\n Keyword arguments for model.evaluate\n\n verbose : bool or int\n Verbose level. 0=non-verbose, 1=verbose, 2=less verbose.\n\n kwargs : dict\n Other keyword arguments parsed.\n\n\n Attributes\n ----------\n train_data : tuple or generator\n The unpacked training data\n\n validation_data : tuple or generator\n The unpacked validation data\n\n model_fn : amber.modeler\n Reference to the callable function to build and implement child models given an architecture sequence.\n\n reward_fn : amber.architect.rewards\n Reference to the callable function to evaluate the rewards on a trained model and the validation dataset.\n\n store_fn : amber.architect.store\n Reference to the callable function to store necessary information (such as predictions, model architectures, and a variety of\n plots etc.) for the given child model.\n\n working_dir : str\n File path to working directory\n\n verbose : bool or int\n Verbose level\n\n TODO\n ------\n - Refactor the rest of attributes as private.\n - Update the description of ``train_data`` and ``validation_data`` to more flexible unpacking, once it's added::\n\n If it's tuple, expects it to be a tuple of numpy.array of\n (x,y); if it's string, expects it to be the file path to a compiled training data; if it's a generator, expects\n it yield a batch of training features and samples.\n\n "
def __init__(self, train_data, validation_data, model_fn, reward_fn, store_fn, working_dir='.', save_full_model=False, epochs=5, child_batchsize=128, verbose=0, fit_kwargs=None, predict_kwargs=None, evaluate_kwargs=None, **kwargs):
super(GeneralManager, self).__init__(**kwargs)
self.train_data = train_data
self.validation_data = validation_data
self.working_dir = working_dir
self.fit_kwargs = (fit_kwargs or {})
self.predict_kwargs = (predict_kwargs or {})
self.evaluate_kwargs = (evaluate_kwargs or {})
self._earlystop_patience = self.fit_kwargs.pop('earlystop_patience', 5)
if (not os.path.exists(self.working_dir)):
os.makedirs(self.working_dir)
self.model_compile_dict = kwargs.pop('model_compile_dict', None)
if (self.model_compile_dict is None):
self.model_compile_dict = model_fn.model_compile_dict
self.model_space = kwargs.pop('model_space', None)
self.save_full_model = save_full_model
self.epochs = epochs
self.batchsize = child_batchsize
self.verbose = verbose
self.model_fn = model_fn
self.reward_fn = reward_fn
self.store_fn = get_store_fn(store_fn)
def get_rewards(self, trial, model_arc, **kwargs):
'The reward getter for a given model architecture\n\n Parameters\n ----------\n trial : int\n An integer number indicating the trial for this architecture\n\n model_arc : list\n The list of architecture sequence\n\n Returns\n -------\n this_reward : float\n The reward signal as determined by ``reward_fn(model, val_data)``\n\n loss_and_metrics : dict\n A dictionary of auxillary information for this model, such as loss, and other metrics (as in ``tf.keras.metrics``)\n '
train_graph = tf.Graph()
train_sess = tf.Session(graph=train_graph)
with train_graph.as_default(), train_sess.as_default():
try:
K.set_session(train_sess)
except RuntimeError:
assert (keras.__version__ > '2.2.5')
pass
model = self.model_fn(model_arc)
if (model is None):
assert hasattr(self.reward_fn, 'min'), ('model_fn of type %s returned a non-valid model, but the given reward_fn of type %s does not have .min() method' % (type(self.model_fn), type(self.reward_fn)))
hist = None
(this_reward, loss_and_metrics, reward_metrics) = self.reward_fn.min(data=self.validation_data)
loss = loss_and_metrics.pop(0)
loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in range(len(loss_and_metrics))}
loss_and_metrics['loss'] = loss
if reward_metrics:
loss_and_metrics.update(reward_metrics)
else:
if self.verbose:
print((' Trial %i: Start training model...' % trial))
(train_x, train_y) = unpack_data(self.train_data)
hist = model.fit(x=train_x, y=train_y, batch_size=(self.batchsize if (train_y is not None) else None), epochs=self.epochs, verbose=self.verbose, validation_data=self.validation_data, callbacks=[ModelCheckpoint(os.path.join(self.working_dir, 'temp_network.h5'), monitor='val_loss', verbose=self.verbose, save_best_only=True), EarlyStopping(monitor='val_loss', patience=self.fit_kwargs.pop('earlystop_patience', 5), verbose=self.verbose)], **self.fit_kwargs)
if os.path.isfile(os.path.join(self.working_dir, 'temp_network.h5')):
model.load_weights(os.path.join(self.working_dir, 'temp_network.h5'))
else:
model.save_weights(os.path.join(self.working_dir, 'temp_network.h5'))
(this_reward, loss_and_metrics, reward_metrics) = self.reward_fn(model, self.validation_data, session=train_sess, graph=train_graph)
loss = loss_and_metrics.pop(0)
loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in range(len(loss_and_metrics))}
loss_and_metrics['loss'] = loss
if reward_metrics:
loss_and_metrics.update(reward_metrics)
if self.store_fn:
val_pred = model.predict(self.validation_data, verbose=self.verbose, **self.predict_kwargs)
self.store_fn(trial=trial, model=model, hist=hist, data=self.validation_data, pred=val_pred, loss_and_metrics=loss_and_metrics, working_dir=self.working_dir, save_full_model=self.save_full_model, knowledge_func=self.reward_fn.knowledge_function)
del model
del hist
gc.collect()
return (this_reward, loss_and_metrics)
|
class DistributedGeneralManager(GeneralManager):
'Distributed manager will place all tensors of any child models to a pre-assigned GPU device\n '
def __init__(self, devices, train_data_kwargs, validate_data_kwargs, do_resample=False, *args, **kwargs):
self.devices = devices
super().__init__(*args, **kwargs)
assert ((devices is None) or (len(self.devices) == 1)), 'Only supports one GPU device currently'
self.train_data_kwargs = (train_data_kwargs or {})
self.validate_data_kwargs = (validate_data_kwargs or {})
self.train_x = None
self.train_y = None
self.file_connected = False
self.arc_records = defaultdict(dict)
self.do_resample = do_resample
def close_handler(self):
if self.file_connected:
self.train_x.close()
if self.train_y:
self.train_y.close()
self._validation_data_gen.close()
self.train_x = None
self.train_y = None
self.file_connected = False
def get_rewards(self, trial, model_arc, remap_device=None, **kwargs):
pid = os.getpid()
sys.stderr.write(('[%s][%s] Preprocessing..' % (pid, datetime.now().strftime('%H:%M:%S'))))
start_time = time.time()
train_graph = tf.Graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
train_sess = tf.Session(graph=train_graph, config=config)
if (remap_device is not None):
target_device = remap_device
elif (self.devices is None):
from ..utils.gpu_query import get_idle_gpus
idle_gpus = get_idle_gpus()
target_device = idle_gpus[0]
target_device = ('/device:GPU:%i' % target_device)
self.devices = [target_device]
sys.stderr.write(('[%s] Auto-assign device: %s' % (pid, target_device)))
else:
target_device = self.devices[0]
with train_graph.as_default(), train_sess.as_default():
with tf.device(target_device):
try:
K.set_session(train_sess)
except RuntimeError:
pass
model = self.model_fn(model_arc)
if (not self.file_connected):
(X_train, y_train) = unpack_data(self.train_data, callable_kwargs=self.train_data_kwargs)
self.train_x = X_train
self.train_y = y_train
assert callable(self.validation_data), ('Expect validation_data to be callable, got %s' % type(self.validation_data))
self._validation_data_gen = self.validation_data(**self.validate_data_kwargs)
self.file_connected = True
elapse_time = (time.time() - start_time)
sys.stderr.write((' %.3f sec\n' % elapse_time))
model_arc_ = tuple(model_arc)
if ((model_arc_ in self.arc_records) and (self.do_resample is True)):
this_reward = self.arc_records[model_arc_]['reward']
old_trial = self.arc_records[model_arc_]['trial']
loss_and_metrics = self.arc_records[model_arc_]['loss_and_metrics']
sys.stderr.write(('[%s][%s] Trial %i: Re-sampled from history %i\n' % (pid, datetime.now().strftime('%H:%M:%S'), trial, old_trial)))
else:
start_time = time.time()
sys.stderr.write(('[%s][%s] Trial %i: Start training model..' % (pid, datetime.now().strftime('%H:%M:%S'), trial)))
hist = model.fit(self.train_x, self.train_y, batch_size=self.batchsize, epochs=self.epochs, verbose=self.verbose, validation_data=self._validation_data_gen, callbacks=[ModelCheckpoint(os.path.join(self.working_dir, 'temp_network.h5'), monitor='val_loss', verbose=self.verbose, save_best_only=True), EarlyStopping(monitor='val_loss', patience=self._earlystop_patience, verbose=self.verbose)], **self.fit_kwargs)
model.load_weights(os.path.join(self.working_dir, 'temp_network.h5'))
elapse_time = (time.time() - start_time)
sys.stderr.write((' %.3f sec\n' % elapse_time))
start_time = time.time()
sys.stderr.write(('[%s] Postprocessing..' % pid))
(this_reward, loss_and_metrics, reward_metrics) = self.reward_fn(model, self._validation_data_gen, session=train_sess)
loss = loss_and_metrics.pop(0)
loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in range(len(loss_and_metrics))}
loss_and_metrics['loss'] = loss
if reward_metrics:
loss_and_metrics.update(reward_metrics)
if self.store_fn:
val_pred = model.predict(self.validation_data, verbose=self.verbose)
self.store_fn(trial=trial, model=model, hist=hist, data=self._validation_data_gen, pred=val_pred, loss_and_metrics=loss_and_metrics, working_dir=self.working_dir, save_full_model=self.save_full_model, knowledge_func=self.reward_fn.knowledge_function)
elapse_time = (time.time() - start_time)
sys.stderr.write((' %.3f sec\n' % elapse_time))
self.arc_records[model_arc_]['trial'] = trial
self.arc_records[model_arc_]['reward'] = this_reward
self.arc_records[model_arc_]['loss_and_metrics'] = loss_and_metrics
start_time = time.time()
sys.stderr.write(('[%s] Cleaning up..' % pid))
try:
del train_sess
del train_graph
del model
del hist
except UnboundLocalError:
pass
gc.collect()
elapse_time = (time.time() - start_time)
sys.stderr.write((' %.3f sec\n' % elapse_time))
return (this_reward, loss_and_metrics)
|
class EnasManager(GeneralManager):
"A specialized manager for Efficient Neural Architecture Search (ENAS).\n\n Because\n\n Parameters\n ----------\n session : tensorflow.Session or None\n The tensorflow session that the manager will be parsed to modelers. By default it's None, which will then get the\n Session from the modeler.\n\n train_data : tuple, string or generator\n Training data to be fed to ``keras.models.Model.fit``.\n\n validation_data : tuple, string, or generator\n Validation data. The data format is understood similarly to train_data.\n\n model_fn : amber.modeler\n A callable function to build and implement child models given an architecture sequence. Must be a model_fn that\n is compatible with ENAS parameter sharing.\n\n reward_fn : amber.architect.rewards\n A callable function to evaluate the rewards on a trained model and the validation dataset.\n\n store_fn : amber.architect.store\n A callable function to store necessary information (such as predictions, model architectures, and a variety of\n plots etc.) for the given child model.\n\n working_dir : str\n File path for working directory.\n\n Attributes\n ----------\n model : amber.modeler.child\n The child DAG that is connected to ``controller.sample_arc`` as the input architecture sequence, which\n will activate a randomly sampled subgraph within child DAG. Because it's hard-wired to the sampled architecture\n in controller, using this model to train and predict will also have the inherent stochastic behaviour that is\n linked to controller.\n\n See Also\n --------\n amber.modeler.child : AMBER wrapped-up version of child models that is intended to have similar interface and\n methods as the ``keras.models.Model`` API.\n\n train_data : tuple or generator\n The unpacked training data\n\n validation_data : tuple or generator\n The unpacked validation data\n\n model_fn : amber.modeler\n Reference to the callable function to build and implement child models given an architecture sequence.\n\n reward_fn : amber.architect.rewards\n Reference to the callable function to evaluate the rewards on a trained model and the validation dataset.\n\n store_fn : amber.architect.store\n Reference to the callable function to store necessary information (such as predictions, model architectures, and a variety of\n plots etc.) for the given child model.\n\n disable_controller : bool\n If true, will randomly return a reward by uniformly sampling in the interval [0,1]. Default is False.\n\n working_dir : str\n File path to working directory\n\n verbose : bool or int\n Verbose level\n\n "
def __init__(self, session=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if (session is None):
self.session = self.model_fn.session
else:
self.session = session
self.model = None
self.disable_controller = kwargs.pop('disable_controller', False)
def get_rewards(self, trial, model_arc=None, nsteps=None):
'The reward getter for a given model architecture.\n\n Because Enas will train child model by random sampling an architecture to activate for each mini-batch,\n there will not be any rewards evaluation in the Manager anymore.\n However, we can still use `get_rewards` as a proxy to train child models\n\n Parameters\n ----------\n trial : int\n An integer number indicating the trial for this architecture\n\n model_arc : list or None\n The list of architecture sequence. If is None (as by default), will return the child DAG with architecture\n connected directly to ``controller.sample_arc`` tensors.\n\n\n nsteps: int\n Optional, if specified, train model nsteps of batches instead of a whole epoch\n\n Returns\n -------\n this_reward : float\n The reward signal as determined by ``reward_fn(model, val_data)``\n\n loss_and_metrics : dict\n A dictionary of auxillary information for this model, such as loss, and other metrics (as in ``tf.keras.metrics``)\n\n\n '
if (self.model is None):
self.model = self.model_fn()
if (model_arc is None):
(X_val, y_val) = self.validation_data[0:2]
(X_train, y_train) = self.train_data
if self.verbose:
print((' Trial %i: Start training model with sample_arc...' % trial))
hist = self.model.fit(X_train, y_train, batch_size=self.batchsize, nsteps=nsteps, epochs=self.epochs, verbose=self.verbose)
if self.store_fn:
val_pred = self.model.predict(X_val, verbose=self.verbose)
self.store_fn(trial=trial, model=self.model, hist=hist, data=self.validation_data, pred=val_pred, loss_and_metrics=None, working_dir=self.working_dir, save_full_model=self.save_full_model, knowledge_func=self.reward_fn.knowledge_function)
return (None, None)
else:
model = self.model_fn(model_arc)
(this_reward, loss_and_metrics, reward_metrics) = self.reward_fn(model, self.validation_data, session=self.session)
loss = loss_and_metrics.pop(0)
loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in range(len(loss_and_metrics))}
loss_and_metrics['loss'] = loss
if reward_metrics:
loss_and_metrics.update(reward_metrics)
if self.disable_controller:
this_reward = np.random.uniform(0, 1)
return (this_reward, loss_and_metrics)
|
def get_layer_shortname(layer):
'Get the short name for a computational operation of a layer, useful in converting a Layer object to a string as\n ID or when plotting\n\n Parameters\n ----------\n layer : amber.architect.Operation\n The ``Operation`` object for any layer.\n\n Returns\n -------\n sn : str\n The unique short name for this operation\n\n TODO\n -----\n Consider refactoring ``layer`` to ``operation``\n '
if (layer.Layer_type == 'conv1d'):
sn = ('conv_f%s_k%s_%s' % (layer.Layer_attributes['filters'], layer.Layer_attributes['kernel_size'], layer.Layer_attributes['activation']))
if ('dilation' in layer.Layer_attributes):
sn += ('_d%i' % layer.Layer_attributes['dilation'])
elif (layer.Layer_type == 'denovo'):
sn = ('%s_f%s_k%s' % ('regconv2d', layer.Layer_attributes['filters'], layer.Layer_attributes['kernel_size']))
elif (layer.Layer_type == 'dense'):
sn = ('%s_u%s_%s' % (layer.Layer_type, layer.Layer_attributes['units'], layer.Layer_attributes['activation']))
elif ((layer.Layer_type == 'maxpool1d') or (layer.Layer_type == 'avgpool1d')):
sn = layer.Layer_type
elif ((layer.Layer_type == 'flatten') or (layer.Layer_type == 'identity') or (layer.Layer_type == 'globalmaxpool1d') or (layer.Layer_type == 'globalavgpool1d')):
sn = layer.Layer_type
elif (layer.Layer_type == 'sfc'):
sn = layer.Layer_type
else:
sn = str(layer)
return sn
|
class State(object):
'The Amber internal holder for a computational operation at any layer\n\n Parameters\n ----------\n Layer_type : str\n The string for the operation type; supports most commonly used ``tf.keras.layers`` types\n\n kwargs :\n Operation/layer specifications are parsed through keyword arguments\n\n Attributes\n ----------\n Layer_type : str\n The string for the operation type.\n\n Layer_attributes : dict\n The dictionary that holds key-value pairs for all specification for this layer.\n\n Notes\n ------\n Any attributes that are not specified in ``Layer_attributes`` will use the default value as defined in\n ``tf.keras.layers``. For example, if you do not specify ``activation`` in ``Layer_attributes``, it will use ``linear``.\n\n Examples\n --------\n For example, to create a 1D-convolutional operation with ReLU activation, kernel size=8, number of kernels=32::\n\n >>> from amber.architect import State\n >>> op = State("conv1d", filters=32, kernel_size=8, activation=\'relu\')\n\n '
def __init__(self, Layer_type, **kwargs):
Layer_type = Layer_type.lower()
self.Layer_type = Layer_type
self.Layer_attributes = kwargs
def __str__(self):
return '{}:{}'.format(self.Layer_type, self.Layer_attributes)
def __eq__(self, other):
return ((self.Layer_type == other.Layer_type) and (self.Layer_attributes == other.Layer_attributes))
def __hash__(self):
unroll_attr = ((x, self.Layer_attributes[x]) for x in self.Layer_attributes)
return hash((self.Layer_type, unroll_attr))
|
class ModelSpace():
'Model Space constructor\n\n Provides utility functions for holding "states" / "operations" that the controller must use to train and predict.\n Also provides a more convenient way to define the model search space\n\n There are several ways to construct a model space. For example, one way is to initialize an empty ``ModelSpace`` then\n iteratively add layers to it, where each layer has a number of candidate operations::\n\n >>> def get_model_space(out_filters=64, num_layers=9):\n >>> model_space = ModelSpace()\n >>> num_pool = 4\n >>> expand_layers = [num_layers//num_pool*i-1 for i in range(1, num_pool)]\n >>> for i in range(num_layers):\n >>> model_space.add_layer(i, [\n >>> Operation(\'conv1d\', filters=out_filters, kernel_size=8, activation=\'relu\'),\n >>> Operation(\'conv1d\', filters=out_filters, kernel_size=4, activation=\'relu\'),\n >>> Operation(\'maxpool1d\', filters=out_filters, pool_size=4, strides=1),\n >>> Operation(\'avgpool1d\', filters=out_filters, pool_size=4, strides=1),\n >>> Operation(\'identity\', filters=out_filters),\n >>> ])\n >>> if i in expand_layers:\n >>> out_filters *= 2\n >>> return model_space\n\n Alternatively, ModelSpace can also be constructed from a dictionary.\n\n '
def __init__(self, **kwargs):
self.state_space = defaultdict(list)
def __str__(self):
return 'StateSpace with {} layers and {} total combinations'.format(len(self.state_space), self.get_space_size())
def __len__(self):
return len(self.state_space)
def __getitem__(self, layer_id):
if (layer_id < 0):
layer_id = (len(self.state_space) + layer_id)
if (layer_id not in self.state_space):
raise IndexError('layer_id out of range')
return self.state_space[layer_id]
def __setitem__(self, layer_id, layer_states):
self.add_layer(layer_id, layer_states)
def get_space_size(self):
'Get the total model space size by the product of all candidate operations across all layers. No residual\n connections are considered.\n\n Returns\n -------\n size : int\n The total number of possible combinations of operations.\n '
size_ = 1
for i in self.state_space:
size_ *= len(self.state_space[i])
return size_
def add_state(self, layer_id, state):
'Append a new state/operation to a layer\n\n Parameters\n ----------\n layer_id : int\n Which layer to append a new operation.\n\n state : amber.architect.State\n The new operation object to be appended.\n\n Returns\n -------\n\n '
self.state_space[layer_id].append(state)
def delete_state(self, layer_id, state_id):
'Delete an operation from layer\n\n Parameters\n ----------\n layer_id : int\n Which layer to delete an operation\n\n state_id : int\n Which operation index to be deleted\n\n Returns\n -------\n\n '
del self.state_space[layer_id][state_id]
def add_layer(self, layer_id, layer_states=None):
'Add a new layer to model space\n\n Parameters\n ----------\n layer_id : int\n The layer id of which layer to be added. Can be incontinuous to previous layers.\n\n layer_states : list of amber.architect.Operation\n A list of ``Operation`` object to be added.\n\n Returns\n -------\n bool\n Boolean value of Whether the model space is valid after inserting this layer\n '
if (layer_states is None):
self.state_space[layer_id] = []
else:
self.state_space[layer_id] = layer_states
return self._check_space_integrity()
def delete_layer(self, layer_id):
'Delete an entire layer and its associated values\n\n Parameters\n ----------\n layer_id : int\n which layer index to be deleted\n\n Returns\n -------\n bool\n Boolean value of Whether the model space is valid after inserting this layer\n '
del self.state_space[layer_id]
return self._check_space_integrity()
def _check_space_integrity(self):
return ((len(self.state_space) - 1) == max(self.state_space.keys()))
def print_state_space(self):
'\n print out the model space in a nice layout (not so nice yet)\n '
for i in range(len(self.state_space)):
print('Layer {}'.format(i))
print('\n'.join([(' ' + str(x)) for x in self.state_space[i]]))
print(('-' * 10))
return
def get_random_model_states(self):
'Get a random combination of model operations throughout each layer\n\n Returns\n -------\n model_states : list\n A list of randomly sampled model operations\n '
model_states = []
for i in range(len(self.state_space)):
model_states.append(np.random.choice(self.state_space[i]))
return model_states
@staticmethod
def from_dict(d):
'Static method for creating a ModelSpace from a Dictionary or List\n\n Parameters\n ----------\n d : dict or list\n A dictionary or list specifying candidate operations for each layer\n\n Returns\n -------\n amber.architect.ModelSpace\n The constructed model space from the given dict/list\n\n '
import ast
assert (type(d) in (dict, list))
num_layers = len(d)
ms = ModelSpace()
for i in range(num_layers):
for j in range(len(d[i])):
if (('shape' in d[i][j]) and (type(d[i][j]['shape']) is str)):
d[i][j] = ast.literal_eval(d[i][j]['shape'])
ms.add_layer(layer_id=i, layer_states=[State(**d[i][j]) for j in range(len(d[i]))])
return ms
|
class BranchedModelSpace(ModelSpace):
'\n Parameters\n ----------\n subspaces : list\n A list of `ModelSpace`. First element is a list of input branches. Second element is a stem model space\n concat_op : str\n string identifier for how to concatenate different input branches\n\n '
def __init__(self, subspaces, concat_op='concatenate', **kwargs):
super().__init__(**kwargs)
self.subspaces = subspaces
self.concat_op = concat_op
self._layer_to_branch = {}
self._branch_to_layer = {}
layer_id = 0
for (i, model_space) in enumerate(self.subspaces[0]):
for _layer in range(len(model_space)):
self.state_space[layer_id] = model_space[_layer]
self._layer_to_branch[layer_id] = (0, i)
layer_id += 1
for _layer in range(len(self.subspaces[1])):
self.state_space[layer_id] = self.subspaces[1][_layer]
self._layer_to_branch[layer_id] = (1, None)
layer_id += 1
for (k, v) in self._layer_to_branch.items():
if (v in self._branch_to_layer):
self._branch_to_layer[v].append(k)
else:
self._branch_to_layer[v] = [k]
@property
def layer_to_branch(self):
return self._layer_to_branch
@property
def branch_to_layer(self):
return self._branch_to_layer
|
class MultiInputController(GeneralController):
"\n DOCSTRING\n\n Parameters\n ----------\n model_space:\n with_skip_connection:\n with_input_blocks:\n share_embedding: dict\n a Dictionary defining which child-net layers will share the softmax and\n embedding weights during Controller training and sampling\n use_ppo_loss:\n kl_threshold:\n num_input_blocks:\n input_block_unique_connection:\n buffer_size:\n batch_size:\n session:\n train_pi_iter:\n lstm_size:\n lstm_num_layers:\n lstm_keep_prob:\n tanh_constant:\n temperature:\n lr_init:\n lr_dec_start:\n lr_dec_every:\n lr_dec_rate:\n l2_reg:\n clip_mode:\n grad_bound:\n use_critic:\n bl_dec:\n optim_algo:\n sync_replicas:\n num_aggregate:\n num_replicas:\n skip_target: float\n the expected proportion of skip connections, i.e. the proportion of 1's in the skip/extra\n connections in the output `arc_seq`\n skip_weight: float\n the weight for skip connection kl-divergence from the expected `skip_target`\n name: str\n name for the instance; also used for all tensor variable scopes\n\n Attributes\n ----------\n g_emb: tf.Tensor\n initial controller hidden state tensor; to be learned\n Placeholder\n\n Note\n ----------\n This class derived from `GeneralController` adds the input feature block selection upon the Base class. Since the\n selection is inherently embedded in the NAS cell rolling-out, the sampler and trainer methods are overwritten.\n\n TODO:\n needs to evaluate how different ways of connecting inputs will affect search performance; e.g. connect input\n before operation or after?\n "
def __init__(self, model_space, buffer_type='ordinal', with_skip_connection=True, with_input_blocks=True, share_embedding=None, use_ppo_loss=False, kl_threshold=0.05, num_input_blocks=2, input_block_unique_connection=True, skip_connection_unique_connection=False, buffer_size=15, batch_size=5, session=None, train_pi_iter=20, lstm_size=32, lstm_num_layers=2, lstm_keep_prob=1.0, tanh_constant=None, temperature=None, skip_target=0.8, skip_weight=0.5, optim_algo='adam', name='controller', *args, **kwargs):
self.with_input_blocks = with_input_blocks
self.num_input_blocks = num_input_blocks
self.input_block_unique_connection = input_block_unique_connection
super().__init__(model_space=model_space, buffer_type=buffer_type, with_skip_connection=with_skip_connection, share_embedding=share_embedding, use_ppo_loss=use_ppo_loss, kl_threshold=kl_threshold, skip_connection_unique_connection=skip_connection_unique_connection, buffer_size=buffer_size, batch_size=batch_size, session=session, train_pi_iter=train_pi_iter, lstm_size=lstm_size, lstm_num_layers=lstm_num_layers, lstm_keep_prob=lstm_keep_prob, tanh_constant=tanh_constant, temperature=temperature, optim_algo=optim_algo, skip_target=skip_target, skip_weight=skip_weight, name=name, **kwargs)
def _create_weight(self):
super()._create_weight()
if self.with_input_blocks:
with tf.variable_scope('input', initializer=tf.random_uniform_initializer(minval=(- 0.1), maxval=0.1)):
self.input_emb = tf.get_variable('inp_emb', [self.num_input_blocks, self.lstm_size])
self.w_soft['input'] = tf.get_variable('w_input', [self.lstm_size, self.num_input_blocks])
def _build_sampler(self):
'Build the sampler ops and the log_prob ops.'
anchors = []
anchors_w_1 = []
arc_seq = []
hidden_states = []
entropys = []
probs_ = []
log_probs = []
skip_count = []
skip_penaltys = []
prev_c = [tf.zeros([1, self.lstm_size], tf.float32) for _ in range(self.lstm_num_layers)]
prev_h = [tf.zeros([1, self.lstm_size], tf.float32) for _ in range(self.lstm_num_layers)]
inputs = self.g_emb
skip_targets = tf.constant([(1.0 - self.skip_target), self.skip_target], dtype=tf.float32)
input_block_record = []
skip_conn_record = []
for layer_id in range(self.num_layers):
(next_c, next_h) = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
(prev_c, prev_h) = (next_c, next_h)
hidden_states.append(prev_h)
logit = tf.matmul(next_h[(- 1)], self.w_soft['start'][layer_id])
if (self.temperature is not None):
logit /= self.temperature
if (self.tanh_constant is not None):
logit = (self.tanh_constant * tf.tanh(logit))
probs_.append(tf.nn.softmax(logit))
start = tf.multinomial(logit, 1)
start = tf.to_int32(start)
start = tf.reshape(start, [1])
arc_seq.append(start)
log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=start)
log_probs.append(log_prob)
entropy = tf.stop_gradient((log_prob * tf.exp((- log_prob))))
entropys.append(entropy)
inputs = tf.nn.embedding_lookup(self.w_emb['start'][layer_id], start)
if self.with_input_blocks:
(next_c, next_h) = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
(prev_c, prev_h) = (next_c, next_h)
hidden_states.append(prev_h)
block_query = tf.reshape(tf.matmul(next_h[(- 1)], self.w_soft['input']), (self.num_input_blocks, 1))
if (layer_id != (self.num_layers - 1)):
if (self.input_block_unique_connection and (layer_id > 0)):
mask = tf.stop_gradient(tf.reduce_sum(tf.stack(input_block_record), axis=0))
mask = tf.reshape(mask, [self.num_input_blocks, 1])
mask1 = tf.greater(mask, 0)
block_query = tf.where(mask1, y=block_query, x=tf.fill(tf.shape(block_query), (- 10000.0)))
else:
mask = tf.stop_gradient(tf.reduce_sum(tf.stack(input_block_record), axis=0))
mask = tf.reshape(mask, [self.num_input_blocks, 1])
mask2 = tf.equal(mask, 0)
block_query = tf.where(mask2, y=block_query, x=tf.fill(tf.shape(block_query), 10000.0))
logit = tf.concat([(- block_query), block_query], axis=1)
if (self.temperature is not None):
logit /= self.temperature
if (self.tanh_constant is not None):
logit = (self.tanh_constant * tf.tanh(logit))
probs_.append(tf.expand_dims(tf.nn.softmax(logit), axis=0))
input_block = tf.multinomial(logit, 1)
input_block = tf.to_int32(input_block)
input_block = tf.reshape(input_block, [self.num_input_blocks])
arc_seq.append(input_block)
input_block_record.append(input_block)
log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=input_block)
log_probs.append(tf.reshape(tf.reduce_sum(log_prob), [(- 1)]))
entropy = tf.stop_gradient(tf.reshape(tf.reduce_sum((log_prob * tf.exp((- log_prob)))), [(- 1)]))
entropys.append(entropy)
inputs = tf.cast(tf.reshape(input_block, (1, self.num_input_blocks)), tf.float32)
inputs /= (1.0 + tf.cast(tf.reduce_sum(input_block), tf.float32))
inputs = tf.matmul(inputs, self.input_emb)
if self.with_skip_connection:
if (layer_id > 0):
(next_c, next_h) = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
(prev_c, prev_h) = (next_c, next_h)
hidden_states.append(prev_h)
query = tf.concat(anchors_w_1, axis=0)
query = tf.tanh((query + tf.matmul(next_h[(- 1)], self.w_attn_2)))
query = tf.matmul(query, self.v_attn)
if self.skip_connection_unique_connection:
mask = tf.stop_gradient(tf.reduce_sum(tf.stack(skip_conn_record), axis=0))
mask = tf.slice(mask, begin=[0], size=[layer_id])
mask1 = tf.greater(mask, 0)
query = tf.where(mask1, y=query, x=tf.fill(tf.shape(query), (- 10000.0)))
logit = tf.concat([(- query), query], axis=1)
if (self.temperature is not None):
logit /= self.temperature
if (self.tanh_constant is not None):
logit = (self.tanh_constant * tf.tanh(logit))
probs_.append(tf.expand_dims(tf.nn.softmax(logit), axis=0))
skip = tf.multinomial(logit, 1)
skip = tf.to_int32(skip)
skip = tf.reshape(skip, [layer_id])
arc_seq.append(skip)
skip_conn_record.append(tf.concat([tf.cast(skip, tf.float32), tf.zeros((self.num_layers - layer_id))], axis=0))
skip_prob = tf.sigmoid(logit)
kl = (skip_prob * tf.log((skip_prob / skip_targets)))
kl = tf.reduce_sum(kl)
skip_penaltys.append(kl)
log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=skip)
log_probs.append(tf.reshape(tf.reduce_sum(log_prob), [(- 1)]))
entropy = tf.stop_gradient(tf.reshape(tf.reduce_sum((log_prob * tf.exp((- log_prob)))), [(- 1)]))
entropys.append(entropy)
skip = tf.to_float(skip)
skip = tf.reshape(skip, [1, layer_id])
skip_count.append(tf.reduce_sum(skip))
inputs = tf.matmul(skip, tf.concat(anchors, axis=0))
inputs /= (1.0 + tf.reduce_sum(skip))
else:
skip_conn_record.append(tf.zeros(self.num_layers, 1))
anchors.append(next_h[(- 1)])
anchors_w_1.append(tf.matmul(next_h[(- 1)], self.w_attn_1))
self.anchors = anchors
self.anchors_w_1 = anchors_w_1
self.sample_hidden_states = hidden_states
arc_seq = tf.concat(arc_seq, axis=0)
self.sample_arc = tf.reshape(arc_seq, [(- 1)])
entropys = tf.stack(entropys)
self.sample_entropy = tf.reduce_sum(entropys)
log_probs = tf.stack(log_probs)
self.sample_log_prob = tf.reduce_sum(log_probs)
skip_count = tf.stack(skip_count)
self.skip_count = tf.reduce_sum(skip_count)
skip_penaltys = tf.stack(skip_penaltys)
self.skip_penaltys = tf.reduce_mean(skip_penaltys)
self.sample_probs = probs_
def _build_trainer(self):
anchors = []
anchors_w_1 = []
probs_ = []
ops_each_layer = 1
total_arc_len = sum(([(ops_each_layer + (self.num_input_blocks * self.with_input_blocks))] + [((ops_each_layer + (self.num_input_blocks * self.with_input_blocks)) + (i * self.with_skip_connection)) for i in range(1, self.num_layers)]))
self.total_arc_len = total_arc_len
self.input_arc = [tf.placeholder(shape=(None, 1), dtype=tf.int32, name='arc_{}'.format(i)) for i in range(total_arc_len)]
batch_size = tf.shape(self.input_arc[0])[0]
entropys = []
log_probs = []
skip_count = []
skip_penaltys = []
prev_c = [tf.zeros([batch_size, self.lstm_size], tf.float32) for _ in range(self.lstm_num_layers)]
prev_h = [tf.zeros([batch_size, self.lstm_size], tf.float32) for _ in range(self.lstm_num_layers)]
inputs = tf.matmul(tf.ones((batch_size, 1)), self.g_emb)
skip_targets = tf.constant([(1.0 - self.skip_target), self.skip_target], dtype=tf.float32)
arc_pointer = 0
input_block_record = []
skip_conn_record = []
hidden_states = []
for layer_id in range(self.num_layers):
(next_c, next_h) = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
(prev_c, prev_h) = (next_c, next_h)
hidden_states.append(prev_h)
logit = tf.matmul(next_h[(- 1)], self.w_soft['start'][layer_id])
if (self.temperature is not None):
logit /= self.temperature
if (self.tanh_constant is not None):
logit = (self.tanh_constant * tf.tanh(logit))
start = self.input_arc[arc_pointer]
start = tf.reshape(start, [batch_size])
probs_.append(tf.nn.softmax(logit))
log_prob1 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=start)
log_probs.append(log_prob1)
entropy = tf.stop_gradient((log_prob1 * tf.exp((- log_prob1))))
entropys.append(entropy)
inputs = tf.nn.embedding_lookup(self.w_emb['start'][layer_id], start)
if self.with_input_blocks:
(next_c, next_h) = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
(prev_c, prev_h) = (next_c, next_h)
hidden_states.append(prev_h)
block_query = tf.reshape(tf.matmul(next_h[(- 1)], self.w_soft['input']), ((self.num_input_blocks * batch_size), 1))
if (layer_id != (self.num_layers - 1)):
if (self.input_block_unique_connection and (layer_id > 0)):
mask = tf.stop_gradient(tf.reduce_sum(tf.stack(input_block_record), axis=0))
mask = tf.reshape(mask, [(self.num_input_blocks * batch_size), 1])
mask1 = tf.greater(mask, 0)
block_query = tf.where(mask1, y=block_query, x=tf.fill(tf.shape(block_query), (- 10000.0)))
else:
mask = tf.stop_gradient(tf.reduce_sum(tf.stack(input_block_record), axis=0))
mask = tf.reshape(mask, [(self.num_input_blocks * batch_size), 1])
mask2 = tf.equal(mask, 0)
block_query = tf.where(mask2, y=block_query, x=tf.fill(tf.shape(block_query), 10000.0))
logit = tf.concat([(- block_query), block_query], axis=1)
if (self.temperature is not None):
logit /= self.temperature
if (self.tanh_constant is not None):
logit = (self.tanh_constant * tf.tanh(logit))
probs_.append(tf.reshape(tf.nn.softmax(logit), [batch_size, self.num_input_blocks, 2]))
input_block = self.input_arc[(arc_pointer + ops_each_layer):((arc_pointer + ops_each_layer) + self.num_input_blocks)]
input_block = tf.reshape(tf.transpose(input_block), [(batch_size * self.num_input_blocks)])
input_block_record.append(input_block)
log_prob2 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=input_block)
log_prob2 = tf.reshape(log_prob2, [batch_size, (- 1)])
log_probs.append(tf.reduce_sum(log_prob2, axis=1))
entropy = tf.stop_gradient(tf.reshape(tf.reduce_sum((log_prob2 * tf.exp((- log_prob2)))), [(- 1)]))
entropys.append(entropy)
inputs = tf.cast(tf.reshape(input_block, (batch_size, self.num_input_blocks)), tf.float32)
inputs /= tf.matmul(tf.reshape((1.0 + tf.cast(tf.reduce_sum(tf.reshape(input_block, (batch_size, self.num_input_blocks)), axis=1), tf.float32)), ((- 1), 1)), tf.ones((1, self.num_input_blocks), dtype=tf.float32))
inputs = tf.matmul(inputs, self.input_emb)
if self.with_skip_connection:
if (layer_id > 0):
(next_c, next_h) = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)
(prev_c, prev_h) = (next_c, next_h)
hidden_states.append(prev_h)
query = tf.transpose(tf.stack(anchors_w_1), [1, 0, 2])
query = tf.tanh((query + tf.expand_dims(tf.matmul(next_h[(- 1)], self.w_attn_2), axis=1)))
query = tf.reshape(query, ((batch_size * layer_id), self.lstm_size))
query = tf.matmul(query, self.v_attn)
if self.skip_connection_unique_connection:
mask = tf.stop_gradient(tf.reduce_sum(tf.stack(skip_conn_record), axis=0))
mask = tf.slice(mask, begin=[0, 0], size=[batch_size, layer_id])
mask = tf.reshape(mask, ((batch_size * layer_id), 1))
mask1 = tf.greater(mask, 0)
query = tf.where(mask1, y=query, x=tf.fill(tf.shape(query), (- 10000.0)))
logit = tf.concat([(- query), query], axis=1)
if (self.temperature is not None):
logit /= self.temperature
if (self.tanh_constant is not None):
logit = (self.tanh_constant * tf.tanh(logit))
probs_.append(tf.reshape(tf.nn.softmax(logit), [batch_size, layer_id, 2]))
if self.with_input_blocks:
skip = self.input_arc[((arc_pointer + ops_each_layer) + self.num_input_blocks):(((arc_pointer + ops_each_layer) + self.num_input_blocks) + layer_id)]
else:
skip = self.input_arc[(arc_pointer + ops_each_layer):((arc_pointer + ops_each_layer) + layer_id)]
skip = tf.reshape(tf.transpose(skip), [(batch_size * layer_id)])
skip = tf.to_int32(skip)
skip_prob = tf.sigmoid(logit)
kl = (skip_prob * tf.log((skip_prob / skip_targets)))
kl = tf.reduce_sum(kl, axis=1)
kl = tf.reshape(kl, [batch_size, (- 1)])
skip_penaltys.append(kl)
log_prob3 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=skip)
log_prob3 = tf.reshape(log_prob3, [batch_size, (- 1)])
log_probs.append(tf.reduce_sum(log_prob3, axis=1))
entropy = tf.stop_gradient(tf.reduce_sum((log_prob3 * tf.exp((- log_prob3))), axis=1))
entropys.append(entropy)
skip = tf.to_float(skip)
skip = tf.reshape(skip, [batch_size, 1, layer_id])
skip_count.append(tf.reduce_sum(skip, axis=2))
anchors_ = tf.stack(anchors)
anchors_ = tf.transpose(anchors_, [1, 0, 2])
inputs = tf.matmul(skip, anchors_)
inputs = tf.squeeze(inputs, axis=1)
inputs /= (1.0 + tf.reduce_sum(skip, axis=2))
else:
skip_conn_record.append(tf.zeros((batch_size, self.num_layers)))
anchors.append(next_h[(- 1)])
anchors_w_1.append(tf.matmul(next_h[(- 1)], self.w_attn_1))
arc_pointer += ((ops_each_layer + (layer_id * self.with_skip_connection)) + (self.num_input_blocks * self.with_input_blocks))
self.train_hidden_states = hidden_states
self.entropys = tf.stack(entropys)
self.onehot_probs = probs_
log_probs = tf.stack(log_probs)
self.onehot_log_prob = tf.reduce_sum(log_probs, axis=0)
skip_count = tf.stack(skip_count)
self.onehot_skip_count = tf.reduce_sum(skip_count, axis=0)
skip_penaltys_flat = [tf.reduce_mean(x, axis=1) for x in skip_penaltys]
self.onehot_skip_penaltys = tf.reduce_mean(skip_penaltys_flat, axis=0)
|
class MultiIOController(MultiInputController):
'\n Example\n ----------\n >>> from BioNAS.MockBlackBox.dense_skipcon_space import get_model_space\n >>> from BioNAS.Controller.multiio_controller import MultiIOController\n >>> import numpy as np\n >>> model_space = get_model_space(5)\n >>> controller = MultiIOController(model_space, output_block_unique_connection=True)\n >>> s = controller.session\n >>> a1, p1 = controller.get_action()\n >>> a2, p2 = controller.get_action()\n >>> a_batch = np.array([a1,a2])\n >>> p_batch = [np.concatenate(x) for x in zip(*[p1,p2])]\n >>> feed_dict = {controller.input_arc[i]: a_batch[:, [i]]\n >>> for i in range(a_batch.shape[1])}\n >>> feed_dict.update({controller.advantage: np.array([1., -1.]).reshape((2,1))})\n >>> feed_dict.update({controller.old_probs[i]: p_batch[i]\n >>> for i in range(len(controller.old_probs))})\n >>> feed_dict.update({controller.reward: np.array([1., 1.]).reshape((2,1))})\n >>> print(s.run(controller.onehot_log_prob, feed_dict))\n >>> for _ in range(100):\n >>> s.run(controller.train_op, feed_dict=feed_dict)\n >>> if _%20==0: print(s.run(controller.loss, feed_dict))\n >>> print(s.run(controller.onehot_log_prob, feed_dict))\n\n Notes\n ----------\n Placeholder for now.\n '
def __init__(self, num_output_blocks=2, with_output_blocks=True, output_block_unique_connection=True, output_block_diversity_weight=None, **kwargs):
self.with_output_blocks = with_output_blocks
self.num_output_blocks = num_output_blocks
skip_weight = (kwargs['skip_weight'] if ('skip_weight' in kwargs) else None)
if (output_block_diversity_weight is not None):
assert (skip_weight is not None), 'Cannot use output_block_diversity_weight when skip_weight is None'
self.output_block_diversity_weight = (output_block_diversity_weight / skip_weight)
else:
self.output_block_diversity_weight = None
self.output_block_unique_connection = output_block_unique_connection
super().__init__(**kwargs)
assert (self.with_skip_connection is True), 'Must have with_skip_connection=True for MultiIOController'
def _create_weight(self):
super()._create_weight()
with tf.variable_scope('outputs', initializer=tf.random_uniform_initializer(minval=(- 0.1), maxval=0.1)):
self.w_soft['output'] = []
for i in range(self.num_output_blocks):
self.w_soft['output'].append(tf.get_variable(('output_block_%i' % i), [self.lstm_size, 1]))
def _build_sampler(self):
super()._build_sampler()
step_size = ((1 + int(self.with_input_blocks)) + int(self.with_skip_connection))
layer_hs = [self.sample_hidden_states[i][(- 1)] for i in range(0, ((self.num_layers * step_size) - 1), step_size)]
layer_hs = tf.concat(layer_hs, axis=0)
output_probs = []
output_onehot = []
output_log_probs = []
for i in range(self.num_output_blocks):
logit = tf.matmul(layer_hs, self.w_soft['output'][i])
if (self.temperature is not None):
logit /= self.temperature
if (self.tanh_constant is not None):
logit = (self.tanh_constant * tf.tanh(logit))
if self.output_block_unique_connection:
output_label = tf.reshape(tf.multinomial(tf.transpose(logit), 1), [(- 1)])
output = tf.one_hot(output_label, self.num_layers)
prob = tf.nn.softmax(tf.squeeze(logit))
prob = tf.reshape(prob, [1, (- 1)])
log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=tf.transpose(logit), labels=output_label)
else:
logit_ = tf.concat([(- logit), logit], axis=1)
output = tf.squeeze(tf.multinomial(logit_, 1))
prob = tf.nn.sigmoid(logit_)
prob = tf.reshape(prob, [1, (- 1), 2])
log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit_, labels=output)
output_onehot.append(tf.cast(output, tf.int32))
output_probs.append(prob)
output_log_probs.append(log_prob)
self.sample_probs.extend(output_probs)
self.sample_arc = tf.concat([self.sample_arc, tf.reshape(output_onehot, [(- 1)])], axis=0)
self.sample_log_prob += tf.reduce_sum(output_log_probs)
def _build_trainer(self):
super()._build_trainer()
output_arc_len = (self.num_layers * self.num_output_blocks)
self.input_arc += [tf.placeholder(shape=(None, 1), dtype=tf.int32, name='arc_{}'.format(i)) for i in range(self.total_arc_len, (self.total_arc_len + output_arc_len))]
self.total_arc_len += output_arc_len
step_size = ((1 + int(self.with_input_blocks)) + int(self.with_skip_connection))
layer_hs = [self.train_hidden_states[i][(- 1)] for i in range(0, ((self.num_layers * step_size) - 1), step_size)]
layer_hs = tf.transpose(tf.stack(layer_hs), [1, 0, 2])
output_probs = []
output_log_probs = []
for i in range(self.num_output_blocks):
logit = tf.matmul(layer_hs, self.w_soft['output'][i])
if (self.temperature is not None):
logit /= self.temperature
if (self.tanh_constant is not None):
logit = (self.tanh_constant * tf.tanh(logit))
output = self.input_arc[(- output_arc_len):][(self.num_layers * i):(self.num_layers * (i + 1))]
output = tf.transpose(tf.squeeze(tf.stack(output), axis=(- 1)))
if self.output_block_unique_connection:
logit = tf.transpose(logit, [0, 2, 1])
prob = tf.squeeze(tf.nn.softmax(logit), axis=1)
log_prob = tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=output)
else:
logit_ = tf.concat([(- logit), logit], axis=2)
prob = tf.nn.sigmoid(logit_)
log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit_, labels=output)
log_prob = tf.reshape(tf.reduce_mean(log_prob, axis=(- 1)), [(- 1), 1])
output_probs.append(prob)
output_log_probs.append(log_prob)
self.onehot_probs.extend(output_probs)
output_log_probs = tf.squeeze(tf.transpose(tf.stack(output_log_probs), [1, 0, 2]), axis=(- 1))
self.onehot_log_prob += tf.reduce_sum(output_log_probs, axis=1)
if (self.output_block_diversity_weight is not None):
output_probs = tf.transpose(tf.stack(output_probs), [1, 0, 2])
diversity_penaltys = tf.math.reduce_std(output_probs, axis=1)
diversity_penaltys = tf.reduce_mean(diversity_penaltys, axis=1)
self.onehot_skip_penaltys -= (diversity_penaltys * self.output_block_diversity_weight)
|
def get_store_fn(arg):
'The getter function that returns a callable store function from a string\n\n Parameters\n ----------\n arg : str\n The string identifier for a particular store function. Current choices are:\n - general\n - model_plot\n - minimal\n\n Returns\n -------\n callable\n A callable store function\n '
if (callable(arg) is True):
return arg
elif (arg is None):
return None
elif ((arg.lower() == 'store_general') or (arg.lower() == 'general')):
return store_general
elif ((arg.lower() == 'store_regression') or (arg.lower() == 'regression')):
return store_regression
elif ((arg.lower() == 'store_with_hessian') or (arg.lower() == 'hessian')):
return store_with_hessian
elif ((arg.lower() == 'store_with_model_plot') or (arg.lower() == 'model_plot')):
return store_with_model_plot
elif ((arg.lower() == 'store_minimal') or (arg.lower() == 'minimal')):
return store_minimal
else:
raise Exception(('cannot understand store_fn: %s' % arg))
|
def store_with_model_plot(trial, model, hist, data, pred, loss_and_metrics, working_dir='.', save_full_model=False, *args, **kwargs):
par_dir = os.path.join(working_dir, 'weights', ('trial_%s' % trial))
os.makedirs(par_dir, exist_ok=True)
store_general(trial=trial, model=model, hist=hist, data=data, pred=pred, loss_and_metrics=loss_and_metrics, working_dir=working_dir, save_full_model=save_full_model)
from tensorflow.keras.utils import plot_model
plot_model(model, to_file=os.path.join(par_dir, 'model_arc.png'), show_shapes=True, show_layer_names=True)
|
def store_with_hessian(trial, model, hist, data, pred, loss_and_metrics, working_dir='.', save_full_model=False, knowledge_func=None):
assert (knowledge_func is not None), '`store_with_hessian` requires parsing theknowledge function used.'
par_dir = os.path.join(working_dir, 'weights', ('trial_%s' % trial))
store_general(trial=trial, model=model, hist=hist, data=data, pred=pred, loss_and_metrics=loss_and_metrics, working_dir=working_dir, save_full_model=save_full_model)
from keras.utils import plot_model
plot_model(model, to_file=os.path.join(par_dir, 'model_arc.png'))
plot_hessian(knowledge_func, os.path.join(par_dir, 'hess.png'))
knowledge_func._reset()
plt.close('all')
return
|
def store_general(trial, model, hist, data, pred, loss_and_metrics, working_dir='.', save_full_model=False, *args, **kwargs):
par_dir = os.path.join(working_dir, 'weights', ('trial_%s' % trial))
if os.path.isdir(par_dir):
shutil.rmtree(par_dir)
os.makedirs(par_dir)
if save_full_model:
model.save(os.path.join(working_dir, 'weights', ('trial_%s' % trial), 'full_bestmodel.h5'))
try:
plot_training_history(hist, par_dir)
except:
pass
if os.path.isfile(os.path.join(working_dir, 'temp_network.h5')):
shutil.move(os.path.join(working_dir, 'temp_network.h5'), os.path.join(par_dir, 'bestmodel.h5'))
data = unpack_data(data, unroll_generator_y=True)
metadata = (data[2] if (len(data) > 2) else None)
obs = data[1]
write_pred_to_disk(os.path.join(par_dir, 'pred.txt'), pred, obs, metadata, loss_and_metrics)
|
def store_minimal(trial, model, working_dir='.', save_full_model=False, **kwargs):
par_dir = os.path.join(working_dir, 'weights', ('trial_%s' % trial))
if os.path.isdir(par_dir):
shutil.rmtree(par_dir)
os.makedirs(par_dir)
if save_full_model:
model.save(os.path.join(working_dir, 'weights', ('trial_%s' % trial), 'full_bestmodel.h5'))
if os.path.isfile(os.path.join(working_dir, 'temp_network.h5')):
shutil.move(os.path.join(working_dir, 'temp_network.h5'), os.path.join(par_dir, 'bestmodel.h5'))
|
def write_pred_to_disk(fn, y_pred, y_obs, metadata=None, metrics=None):
with open(fn, 'w') as f:
if (metrics is not None):
f.write(('\n'.join(['# {}: {}'.format(x, metrics[x]) for x in metrics]) + '\n'))
if (type(y_pred) is list):
y_pred = np.concatenate(y_pred, axis=1)
y_obs = np.concatenate(y_obs, axis=1)
if (len(np.unique(y_obs)) < 10):
str_format = '%i'
else:
str_format = '%.3f'
f.write('pred\tobs\tmetadata\n')
for i in range(len(y_pred)):
if ((len(y_pred[i].shape) > 1) or (y_pred[i].shape[0] > 1)):
y_pred_i = ','.join([('%.3f' % x) for x in np.array(y_pred[i])])
y_obs_i = ','.join([(str_format % x) for x in np.array(y_obs[i])])
else:
y_pred_i = ('%.3f' % y_pred[i])
y_obs_i = (str_format % y_obs[i])
if metadata:
f.write(('%s\t%s\t%s\n' % (y_pred_i, y_obs_i, metadata[i])))
else:
f.write(('%s\t%s\t%s\n' % (y_pred_i, y_obs_i, 'NA')))
|
def get_controller_states(model):
return [K.get_value(s) for (s, _) in model.state_updates]
|
def set_controller_states(model, states):
for ((d, _), s) in zip(model.state_updates, states):
K.set_value(d, s)
|
def get_controller_history(fn='train_history.csv'):
with open(fn, 'r') as f:
csvreader = csv.reader(f)
for row in csvreader:
trial = row[0]
return int(trial)
|
def compute_entropy(prob_states):
ent = 0
for prob in prob_states:
for p in prob:
p = np.array(p).flatten()
i = np.where((p > 0))[0]
t = np.sum(((- p[i]) * np.log2(p[i])))
ent += t
return ent
|
class ControllerTrainEnvironment():
'The training evnrionment employs ``controller`` model and ``manager`` to mange data and reward,\n creates a reinforcement learning environment\n\n Parameters\n ----------\n controller : amber.architect.BaseController\n The controller to search architectures in this environment.\n\n manager : amber.architect.BaseManager\n The manager to interact with modelers in this environment.\n\n max_episode : int\n Maximum number of controller steps. Each controller step will sample ``max_step_per_ep`` child model\n architectures.\n\n max_step_per_ep : int\n The number of child model architectures to sample in each controller step (aka ``max_episode``).\n\n logger : logging.Logger or None\n The logger to use in environment\n\n resume_prev_run : bool\n If true, try to reload existing controller and history in the given working directory. Default is False.\n\n should_plot : bool\n If false, turn off the plots at the end of training. Default is False.\n\n initial_buffering_queue : int\n\n\n working_dir : str\n File path to working directory\n\n squeezed_action : bool\n If false, will expect each entry in architecture sequence to be strictly one-hot encoded; if true, some entries\n will be categorically encoded. Categorical encoding is consistent with current settings in\n ``amber.architect.GeneralController``, thus is recommended ``squeezed_action=True``. Default is True.\n\n with_input_blocks : bool\n Whether the controller will search for input blocks. Default is False.\n\n with_skip_connection : bool\n Whether the controller will search for residual/skip connections. Default is True.\n\n save_controller : bool\n Whether to save the final controller parameters in working directory after training (i.e. reaching the max\n controller steps). Default is True.\n\n verbose : bool or int\n Verbosity level\n\n Attributes\n ----------\n\n Notes\n -----\n Note if either `with_input_blocks` or `with_skip_connection`, a list of integers will be used to represent the\n sequential model architecture and wiring, instead of a list of amber.architect.Operation\n\n TODO\n ----\n Refactor the rest of attributes to be private.\n '
def __init__(self, controller, manager, max_episode=100, max_step_per_ep=2, logger=None, resume_prev_run=False, should_plot=True, initial_buffering_queue=15, working_dir='.', entropy_converge_epsilon=0.01, squeezed_action=True, with_input_blocks=False, with_skip_connection=True, save_controller=True, continuous_run=False, verbose=0, **kwargs):
self.controller = controller
self.manager = manager
self.max_episode = max_episode
self.max_step_per_ep = max_step_per_ep
self.start_ep = 0
self.should_plot = should_plot
self.working_dir = working_dir
self.total_reward = 0
self.entropy_record = []
self.entropy_converge_epsilon = entropy_converge_epsilon
self.squeezed_action = squeezed_action
self.with_input_blocks = with_input_blocks
self.with_skip_connection = with_skip_connection
self.save_controller = save_controller
self.initial_buffering_queue = min(initial_buffering_queue, controller.buffer.max_size)
self.continuous_run = continuous_run
self.verbose = verbose
try:
self.last_actionState_size = len(self.controller.state_space[(- 1)])
except Exception as e:
warnings.warn(('DEPRECATED Exception in ControllerTrainEnv: %s' % e), stacklevel=2)
self.last_actionState_size = 1
self.resume_prev_run = resume_prev_run
self.logger = (logger if logger else setup_logger(working_dir))
if issubclass(type(manager), BaseNetworkManager):
if (os.path.realpath(manager.working_dir) != os.path.realpath(self.working_dir)):
warnings.warn('manager working dir and environment working dir are different.', stacklevel=2)
else:
warnings.warn('ControllerTrainEnvironment: input manager is not a subclass of BaseNetworkManager; make sure this is intended', stacklevel=2)
if resume_prev_run:
self.restore()
else:
self.clean()
def __str__(self):
s = ('ControllerTrainEnv for %i max steps, %i child mod. each step' % (self.max_episode, self.max_step_per_ep))
return s
def restore(self):
if self.save_controller:
self.controller.load_weights(os.path.join(self.working_dir, 'controller_weights.h5'))
self.start_ep = get_controller_history(os.path.join(self.working_dir, 'train_history.csv'))
else:
raise Exception('Did not turn on option `save_controller`')
def clean(self):
bak_weights_dir = os.path.join(self.working_dir, ('weights_bak_%s' % datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')))
if os.path.isdir(os.path.join(self.working_dir, 'weights')):
shutil.move(os.path.join(self.working_dir, 'weights'), bak_weights_dir)
movable_files = ['buffers.txt', 'log.controller.txt', 'train_history.csv', 'train_history.png', 'entropy.png', 'nas_training_stats.png', 'controller_states.npy', 'controller_weights.h5', 'controller_hidden_states.png']
if (not self.continuous_run):
movable_files += ['nas_training_stats.json', 'weight_data.json']
movable_files += [x for x in os.listdir(self.working_dir) if (x.startswith('weight_at_layer_') and x.endswith('.png'))]
movable_files += [x for x in os.listdir(self.working_dir) if (x.startswith('inp_at_layer_') and x.endswith('.png'))]
movable_files += [x for x in os.listdir(self.working_dir) if (x.startswith('skip_at_layer_') and x.endswith('.png'))]
for file in movable_files:
file = os.path.join(self.working_dir, file)
if os.path.exists(file):
shutil.move(file, bak_weights_dir)
os.makedirs(os.path.join(self.working_dir, 'weights'))
self.controller.remove_files(movable_files, self.working_dir)
def reset(self):
x = np.random.uniform(0, 5, (1, 1, self.last_actionState_size))
x = (np.exp(x) / np.sum(np.exp(x)))
return x
def step(self, action_prob):
try:
next_state = np.array(action_prob[(- 1)]).reshape((1, 1, self.last_actionState_size))
except ValueError:
next_state = self.reset()
return next_state
def train(self):
'Performs training for controller\n '
LOGGER = self.logger
action_probs_record = []
loss_and_metrics_list = []
global_step = (self.start_ep * self.max_step_per_ep)
if self.resume_prev_run:
f = open(os.path.join(self.working_dir, 'train_history.csv'), mode='a+')
else:
f = open(os.path.join(self.working_dir, 'train_history.csv'), mode='w')
writer = csv.writer(f)
for ep in range(self.start_ep, self.max_episode):
try:
state = self.reset()
ep_reward = 0
loss_and_metrics_ep = {'knowledge': 0, 'acc': 0, 'loss': 0}
if ('metrics' in self.manager.model_compile_dict):
loss_and_metrics_ep.update({x: 0 for x in self.manager.model_compile_dict['metrics']})
ep_probs = []
for step in range(self.max_step_per_ep):
(actions, probs) = self.controller.get_action(state)
self.entropy_record.append(compute_entropy(probs))
next_state = self.step(probs)
ep_probs.append(probs)
if self.squeezed_action:
action_list = parse_action_str_squeezed(actions, self.controller.state_space)
else:
action_list = parse_action_str(actions, self.controller.state_space)
LOGGER.debug('Predicted actions : {}'.format([str(x) for x in action_list]))
if (self.with_input_blocks or self.with_skip_connection):
(reward, loss_and_metrics) = self.manager.get_rewards(global_step, actions)
else:
(reward, loss_and_metrics) = self.manager.get_rewards(global_step, action_list)
LOGGER.debug(((('Rewards : ' + str(reward)) + ' Metrics : ') + str(loss_and_metrics)))
ep_reward += reward
for x in loss_and_metrics.keys():
loss_and_metrics_ep[x] += loss_and_metrics[x]
self.controller.store(state, probs, actions, reward)
data = [global_step, [loss_and_metrics[x] for x in sorted(loss_and_metrics.keys())], reward]
if self.squeezed_action:
data.extend(actions)
else:
data.extend(action_list)
writer.writerow(data)
f.flush()
global_step += 1
state = next_state
loss_and_metrics_list.append({x: (v / self.max_step_per_ep) for (x, v) in loss_and_metrics_ep.items()})
ep_p = [(sum(p) / len(p)) for p in zip(*ep_probs)]
action_probs_record.append(ep_p)
if (ep >= (self.initial_buffering_queue - 1)):
loss = self.controller.train(ep, self.working_dir)
self.total_reward += np.sum(np.array(self.controller.buffer.lt_adbuffer[(- 1)]).flatten())
LOGGER.debug(('Total reward : ' + str(self.total_reward)))
LOGGER.debug(('END episode %d: Controller loss : %0.6f' % (ep, loss)))
LOGGER.debug(('-' * 10))
else:
LOGGER.debug(('END episode %d: Buffering' % ep))
LOGGER.debug(('-' * 10))
if self.save_controller:
self.controller.save_weights(os.path.join(self.working_dir, 'controller_weights.h5'))
except KeyboardInterrupt:
LOGGER.info('User disrupted training')
break
LOGGER.debug(('Total Reward : %s' % self.total_reward))
f.close()
plot_controller_performance(os.path.join(self.working_dir, 'train_history.csv'), metrics_dict={k: v for (k, v) in zip(sorted(loss_and_metrics.keys()), range(len(loss_and_metrics)))}, save_fn=os.path.join(self.working_dir, 'train_history.png'), N_sma=10)
plot_environment_entropy(self.entropy_record, os.path.join(self.working_dir, 'entropy.png'))
save_kwargs = {}
if self.with_input_blocks:
save_kwargs['input_nodes'] = self.manager.model_fn.inputs_op
save_action_weights(action_probs_record, self.controller.state_space, self.working_dir, with_input_blocks=self.with_input_blocks, with_skip_connection=self.with_skip_connection, **save_kwargs)
save_stats(loss_and_metrics_list, self.working_dir)
if self.should_plot:
plot_action_weights(self.working_dir)
plot_wiring_weights(self.working_dir, self.with_input_blocks, self.with_skip_connection)
plot_stats2(self.working_dir)
act_idx = []
for p in ep_p:
act_idx.append(np.argmax(p))
return act_idx
|
class EnasTrainEnv(ControllerTrainEnvironment):
'\n Params:\n time_budget: defaults to 72 hours\n '
def __init__(self, *args, **kwargs):
self.time_budget = kwargs.pop('time_budget', '72:00:00')
self.child_train_steps = kwargs.pop('child_train_steps', None)
self.child_warm_up_epochs = kwargs.pop('child_warm_up_epochs', 0)
self.save_controller_every = kwargs.pop('save_controller_every', None)
super().__init__(*args, **kwargs)
self.initial_buffering_queue = 0
if issubclass(type(self.manager), BaseNetworkManager):
if (self.manager.model_fn.controller is None):
self.manager.model_fn.set_controller(self.controller)
else:
warnings.warn('EnasTrainEnv: input manager is not a subclass of BaseNetworkManager; make sure this is intended', stacklevel=2)
if (self.time_budget is None):
pass
elif (type(self.time_budget) is str):
print(('time budget set to: %s' % self.time_budget))
self.time_budget = sum(((x * int(t)) for (x, t) in zip([3600, 60, 1], self.time_budget.split(':'))))
else:
raise Exception(('time budget should be in format HH:mm:ss; cannot understand : %s' % self.time_budget))
self.action_probs_record = None
def train(self):
LOGGER = self.logger
if self.verbose:
LOGGER.setLevel(10)
else:
LOGGER.setLevel(40)
action_probs_record = []
loss_and_metrics_list = []
state = self.reset()
controller_step = (self.start_ep * self.max_step_per_ep)
if self.resume_prev_run:
f = open(os.path.join(self.working_dir, 'train_history.csv'), mode='a+')
else:
f = open(os.path.join(self.working_dir, 'train_history.csv'), mode='w')
writer = csv.writer(f)
starttime = datetime.datetime.now()
if (self.child_warm_up_epochs > 0):
LOGGER.info(('warm-up for child model: %i epochs' % self.child_warm_up_epochs))
warmup_nsteps = None
for i in range(1, (self.child_warm_up_epochs + 1)):
LOGGER.info(('warm-up : %i epoch' % i))
self.manager.get_rewards(trial=(- i), model_arc=None, nsteps=warmup_nsteps)
for child_step in range(self.start_ep, self.max_episode):
try:
ep_reward = 0
loss_and_metrics_ep = {'knowledge': 0, 'acc': 0, 'loss': 0}
if ('metrics' in self.manager.model_compile_dict):
loss_and_metrics_ep.update({x: 0 for x in self.manager.model_compile_dict['metrics']})
ep_probs = []
self.manager.get_rewards(child_step, None, nsteps=self.child_train_steps)
for step in range(self.max_step_per_ep):
(arc_seq, probs) = self.controller.get_action()
self.entropy_record.append(compute_entropy(probs))
ep_probs.append(probs)
action_list = parse_action_str_squeezed(arc_seq, self.controller.state_space)
LOGGER.debug('Predicted actions : {}'.format([str(x) for x in action_list]))
(reward, loss_and_metrics) = self.manager.get_rewards(controller_step, arc_seq, nsteps=self.child_train_steps)
LOGGER.debug(((('Rewards : ' + str(reward)) + ' Metrics : ') + str(loss_and_metrics)))
ep_reward += reward
for x in loss_and_metrics.keys():
loss_and_metrics_ep[x] += loss_and_metrics[x]
self.controller.store(state=state, prob=probs, action=arc_seq, reward=reward)
data = [controller_step, [loss_and_metrics[x] for x in sorted(loss_and_metrics.keys())], reward]
if self.squeezed_action:
data.extend(arc_seq)
else:
data.extend(action_list)
writer.writerow(data)
f.flush()
controller_step += 1
loss_and_metrics_list.append({x: (v / self.max_step_per_ep) for (x, v) in loss_and_metrics_ep.items()})
ep_p = [(sum(p) / len(p)) for p in zip(*ep_probs)]
action_probs_record.append(ep_p)
if (child_step >= (self.initial_buffering_queue - 1)):
loss = self.controller.train(child_step, self.working_dir)
self.total_reward += np.sum(np.array(self.controller.buffer.lt_adbuffer[(- 1)]).flatten())
LOGGER.info(('Total reward : ' + str(self.total_reward)))
LOGGER.info(('END episode %d: Controller loss : %0.6f' % (child_step, loss)))
LOGGER.info(('-' * 10))
else:
LOGGER.info(('END episode %d: Buffering' % child_step))
LOGGER.info(('-' * 10))
if ((self.save_controller_every is not None) and ((child_step % self.save_controller_every) == 0)):
self.logger.info(('Saving controller weights for epoch %d' % child_step))
self.controller.save_weights(os.path.join(self.working_dir, ('controller_weights-epoch-%i.h5' % child_step)))
except KeyboardInterrupt:
LOGGER.info('User disrupted training')
break
consumed_time = (datetime.datetime.now() - starttime).total_seconds()
LOGGER.info(('used time: %.2f %%' % ((consumed_time / self.time_budget) * 100)))
if (consumed_time >= self.time_budget):
LOGGER.info('training ceased because run out of time budget')
break
LOGGER.debug(('Total Reward : %s' % self.total_reward))
if self.save_controller:
self.controller.save_weights(os.path.join(self.working_dir, 'controller_weights.h5'))
f.close()
plot_controller_performance(os.path.join(self.working_dir, 'train_history.csv'), metrics_dict={k: v for (k, v) in zip(sorted(loss_and_metrics.keys()), range(len(loss_and_metrics)))}, save_fn=os.path.join(self.working_dir, 'train_history.png'), N_sma=10)
plot_environment_entropy(self.entropy_record, os.path.join(self.working_dir, 'entropy.png'))
save_kwargs = {}
if self.with_input_blocks:
save_kwargs['input_nodes'] = self.manager.model_fn.inputs_op
self.action_probs_record = action_probs_record
save_action_weights(action_probs_record, self.controller.state_space, self.working_dir, with_input_blocks=self.with_input_blocks, with_skip_connection=self.with_skip_connection, **save_kwargs)
self.action_probs_record = loss_and_metrics_list
save_stats(loss_and_metrics_list, self.working_dir)
if self.should_plot:
plot_action_weights(self.working_dir)
plot_wiring_weights(self.working_dir, self.with_input_blocks, self.with_skip_connection)
plot_stats2(self.working_dir)
act_idx = []
for p in ep_p:
act_idx.append(np.argmax(p))
return act_idx
|
class MultiManagerEnvironment(EnasTrainEnv):
'\n MultiManagerEnvironment is an environment that allows one controller to interact with multiple EnasManagers\n '
def __init__(self, data_descriptive_features, is_enas='auto', *args, **kwargs):
super(MultiManagerEnvironment, self).__init__(*args, **kwargs)
assert (type(self.manager) is list), ('MultiManagerEnasEnvironment must have a List of manager instances, got %s' % type(self.manager))
self.manager_cnt = len(self.manager)
self.is_enas = is_enas
for i in range(self.manager_cnt):
assert issubclass(type(self.manager[i]), BaseNetworkManager), ('MultiManagerEnasEnvironment expects a List of Manager instances, got %s for %i-th element' % (type(self.manager[i]), i))
self.data_descriptive_features = data_descriptive_features
assert (len(self.data_descriptive_features) == self.manager_cnt), ('data descriptive features must match the number of managers; got %i description, %i managers' % (len(self.data_descriptive_features), self.manager_cnt))
if (self.is_enas == 'auto'):
if all([isinstance(self.manager[i], EnasManager) for i in range(self.manager_cnt)]):
self.is_enas = True
else:
self.is_enas = False
def _warmup(self):
assert self.is_enas, 'You can only set warm_up_epochs>0 if is_enas=True'
self.logger.info(('warm-up for child model: %i epochs' % self.child_warm_up_epochs))
warmup_nsteps = None
for i in range(1, (self.child_warm_up_epochs + 1)):
self.logger.info(('warm-up : %i epoch' % i))
for j in range(self.manager_cnt):
self.manager[j].get_rewards(trial=(- i), model_arc=None, nsteps=warmup_nsteps)
def _train_loop(self):
if self.resume_prev_run:
f = open(os.path.join(self.working_dir, 'train_history.csv'), mode='a+')
else:
f = open(os.path.join(self.working_dir, 'train_history.csv'), mode='w')
writer = csv.writer(f)
starttime = datetime.datetime.now()
action_probs_record = []
loss_and_metrics_list = []
controller_step = (self.start_ep * self.max_step_per_ep)
for child_step in range(self.start_ep, self.max_episode):
try:
if self.is_enas:
for j in range(self.manager_cnt):
self.logger.info(('sampling with manager %i' % j))
self.manager[j].get_rewards(child_step, None, nsteps=self.child_train_steps)
ep_reward = 0
loss_and_metrics_ep = defaultdict(float)
for step in range(self.max_step_per_ep):
for j in range(self.manager_cnt):
ep_probs = []
(arc_seq, probs) = self.controller.get_action(description_feature=self.data_descriptive_features[[j]])
self.entropy_record.append(compute_entropy(probs))
ep_probs.append(probs)
action_list = parse_action_str_squeezed(arc_seq, self.controller.state_space)
self.logger.debug('Manager {}, Predicted actions : {}'.format(j, [str(x) for x in action_list]))
(reward, loss_and_metrics) = self.manager[j].get_rewards(trial=controller_step, model_arc=arc_seq, nsteps=self.child_train_steps)
self.logger.debug(((('Rewards : ' + str(reward)) + ' Metrics : ') + str(loss_and_metrics)))
ep_reward += reward
for x in loss_and_metrics.keys():
loss_and_metrics_ep[x] += loss_and_metrics[x]
self.controller.store(prob=probs, action=arc_seq, reward=reward, description=self.data_descriptive_features[[j]], manager_index=j)
data = [('%i-%i' % (j, controller_step)), [loss_and_metrics[x] for x in sorted(loss_and_metrics.keys())], reward]
if self.squeezed_action:
data.extend(arc_seq)
else:
data.extend(action_list)
writer.writerow(data)
f.flush()
controller_step += 1
loss_and_metrics_list.append({x: (v / self.max_step_per_ep) for (x, v) in loss_and_metrics_ep.items()})
ep_p = [(sum(p) / len(p)) for p in zip(*ep_probs)]
action_probs_record.append(ep_p)
if (child_step >= (self.initial_buffering_queue - 1)):
loss = self.controller.train(child_step, self.working_dir)
self.total_reward += np.sum(np.array(self.controller.buffer.lt_adv[(- 1)]).flatten())
self.logger.info(('Total reward : ' + str(self.total_reward)))
self.logger.info(('END episode %d: Controller loss : %0.6f' % (child_step, loss)))
self.logger.info(('-' * 10))
else:
self.logger.info(('END episode %d: Buffering' % child_step))
self.logger.info(('-' * 10))
if ((self.save_controller_every is not None) and ((child_step % self.save_controller_every) == 0)):
self.logger.info(('Saving controller weights for epoch %d' % child_step))
self.controller.save_weights(os.path.join(self.working_dir, ('controller_weights-epoch-%i.h5' % child_step)))
except KeyboardInterrupt:
self.logger.info('User disrupted training')
break
consumed_time = (datetime.datetime.now() - starttime).total_seconds()
self.logger.info(('used time: %.2f %%' % ((consumed_time / self.time_budget) * 100)))
if (consumed_time >= self.time_budget):
self.logger.info('training ceased because run out of time budget')
break
self.logger.debug(('Total Reward : %s' % self.total_reward))
f.close()
return (action_probs_record, loss_and_metrics_list)
def train(self):
if (self.child_warm_up_epochs > 0):
self._warmup()
(action_probs_record, loss_and_metrics_list) = self._train_loop()
metrics_dict = {k: v for (k, v) in zip(sorted(loss_and_metrics_list[0].keys()), range(len(loss_and_metrics_list[0])))}
plot_controller_performance(os.path.join(self.working_dir, 'train_history.csv'), metrics_dict=metrics_dict, save_fn=os.path.join(self.working_dir, 'train_history.png'), N_sma=5)
plot_environment_entropy(self.entropy_record, os.path.join(self.working_dir, 'entropy.png'))
save_kwargs = {}
if self.with_input_blocks:
save_kwargs['input_nodes'] = self.manager.model_fn.inputs_op
self.action_probs_record = action_probs_record
save_action_weights(action_probs_record, self.controller.state_space, self.working_dir, with_input_blocks=self.with_input_blocks, with_skip_connection=self.with_skip_connection, **save_kwargs)
save_stats(loss_and_metrics_list, self.working_dir)
if self.should_plot:
plot_action_weights(self.working_dir)
plot_wiring_weights(self.working_dir, self.with_input_blocks, self.with_skip_connection)
plot_stats2(self.working_dir)
act_idx = []
for p in action_probs_record[(- 1)]:
act_idx.append(np.argmax(p))
return act_idx
|
class ParallelMultiManagerEnvironment(MultiManagerEnvironment):
def __init__(self, processes=2, enable_manager_sampling=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.processes = processes
self.enable_manager_sampling = enable_manager_sampling
self._gpus = get_available_gpus()
assert (self.processes >= 1)
if (self.enable_manager_sampling is False):
assert (self.processes <= self.manager_cnt), 'Cannot have more processes than managers without sampling'
@staticmethod
def _reward_getter(args):
pid = os.getpid()
res = []
for i in range(len(args)):
try:
if ('remap_device' in args[i]):
devices = args[i]['remap_device']
elif hasattr(args[i]['manager'], 'devices'):
devices = args[i]['manager'].devices
args[i]['remap_device'] = None
else:
devices = 'NoAttribute'
args[i]['remap_device'] = None
sys.stderr.write(('PID %i: %i/%i run; devices=%s\n' % (pid, i, len(args), devices)))
(reward, loss_and_metrics) = args[i]['manager'].get_rewards(trial=args[i]['trial'], model_arc=args[i]['model_arc'], nsteps=args[i]['nsteps'], remap_device=args[i]['remap_device'])
except Exception as e:
raise Exception(('child pid %i when processing %s, has exception %s' % (pid, args[i]['model_arc'], e)))
res.append({'reward': reward, 'loss_and_metrics': loss_and_metrics})
for i in range(len(args)):
if hasattr(args[i]['manager'], 'close_handler'):
args[i]['manager'].close_handler()
return res
def _train_loop(self):
if self.resume_prev_run:
f = open(os.path.join(self.working_dir, 'train_history.csv'), mode='a+')
else:
f = open(os.path.join(self.working_dir, 'train_history.csv'), mode='w')
writer = csv.writer(f)
starttime = datetime.datetime.now()
action_probs_record = []
loss_and_metrics_list = []
controller_step = (self.start_ep * self.max_step_per_ep)
from multiprocessing import set_start_method, get_context
set_start_method('spawn')
for child_step in range(self.start_ep, self.max_episode):
try:
if self.is_enas:
pool_args = []
for j in range(self.manager_cnt):
pool_args.append([{'manager': self.manager[j], 'trial': child_step, 'model_arc': None, 'nsteps': self.child_train_steps}])
self.logger.info(('sampling of n=%i managers executed in parallel..' % self.manager_cnt))
_ = pool.map(self._reward_getter, pool_args)
ep_reward = 0
loss_and_metrics_ep = defaultdict(float)
pool_args = []
store_args = []
if (self.enable_manager_sampling == True):
replace = (True if (len(self._gpus) > self.manager_cnt) else False)
this_manager_index = np.random.choice(self.manager_cnt, len(self._gpus), replace=replace)
else:
this_manager_index = np.arange(self.manager_cnt)
for (k, j) in enumerate(this_manager_index):
this_pool = []
this_store = []
controller_step_ = controller_step
for step in range(self.max_step_per_ep):
ep_probs = []
(arc_seq, probs) = self.controller.get_action(description_feature=self.data_descriptive_features[[j]])
self.entropy_record.append(compute_entropy(probs))
ep_probs.append(probs)
action_list = parse_action_str_squeezed(arc_seq, self.controller.state_space)
self.logger.debug('Manager {}, Predicted actions : {}'.format(j, [str(x) for x in action_list]))
this_pool.append({'manager': self.manager[j], 'trial': controller_step_, 'model_arc': arc_seq, 'nsteps': self.child_train_steps})
if self.enable_manager_sampling:
this_pool[(- 1)].update({'remap_device': self._gpus[k]})
this_store.append({'prob': probs, 'action': arc_seq, 'description': self.data_descriptive_features[[j]]})
controller_step_ += 1
pool_args.append(this_pool)
store_args.append(this_store)
controller_step += self.max_step_per_ep
a_time = time.time()
with get_context('spawn').Pool(processes=self.processes) as pool:
elp = (time.time() - a_time)
if (self.processes > 1):
self.logger.info(('distributing %i/%i reward-getters to a pool of %i workers.. %.3f sec' % (len(pool_args), self.manager_cnt, self.processes, elp)))
res_list = pool.map(self._reward_getter, pool_args)
else:
self.logger.info(('Only %i worker, running sequentially.. %.3f' % (self.processes, elp)))
res_list = []
for x in pool_args:
res_list.append(self._reward_getter(x))
assert (len(res_list) == len(this_manager_index))
self.logger.info('storing..')
for (m_, (store_, res_)) in enumerate(zip(store_args, res_list)):
m = this_manager_index[m_]
for (t, (store, res)) in enumerate(zip(store_, res_)):
(reward, loss_and_metrics) = (res['reward'], res['loss_and_metrics'])
(probs, arc_seq, description) = (store['prob'], store['action'], store['description'])
ep_reward += reward
trial = pool_args[m_][t]['trial']
for x in loss_and_metrics.keys():
loss_and_metrics_ep[x] += loss_and_metrics[x]
self.controller.store(prob=probs, action=arc_seq, reward=reward, description=self.data_descriptive_features[[m]], manager_index=m)
data = [('%i-%i' % (m, trial)), [loss_and_metrics[x] for x in sorted(loss_and_metrics.keys())], reward]
if self.squeezed_action:
data.extend(arc_seq)
else:
data.extend(action_list)
writer.writerow(data)
f.flush()
loss_and_metrics_list.append({x: (v / self.max_step_per_ep) for (x, v) in loss_and_metrics_ep.items()})
ep_p = [(sum(p) / len(p)) for p in zip(*ep_probs)]
action_probs_record.append(ep_p)
if (child_step >= (self.initial_buffering_queue - 1)):
loss = self.controller.train(child_step, self.working_dir)
self.total_reward += np.sum(np.array(self.controller.buffer.lt_adv[(- 1)]).flatten())
self.logger.info(('Total reward : ' + str(self.total_reward)))
self.logger.info(('END episode %d: Controller loss : %0.6f' % (child_step, loss)))
self.logger.info(('-' * 10))
else:
self.logger.info(('END episode %d: Buffering' % child_step))
self.logger.info(('-' * 10))
if ((self.save_controller_every is not None) and ((child_step % self.save_controller_every) == 0)):
self.logger.info(('Saving controller weights for epoch %d' % child_step))
self.controller.save_weights(os.path.join(self.working_dir, ('controller_weights-epoch-%i.h5' % child_step)))
except KeyboardInterrupt:
self.logger.info('User disrupted training')
break
consumed_time = (datetime.datetime.now() - starttime).total_seconds()
self.logger.info(('used time: %.2f %%' % ((consumed_time / self.time_budget) * 100)))
if (consumed_time >= self.time_budget):
self.logger.info('training ceased because run out of time budget')
break
self.logger.debug(('Total Reward : %s' % self.total_reward))
f.close()
return (action_probs_record, loss_and_metrics_list)
def restore(self):
self.controller.load_weights(os.path.join(self.working_dir, 'controller_weights.h5'))
self.logger.info('Loaded existing weights')
finished_records = 0
with open(os.path.join(self.working_dir, 'train_history.csv'), 'r') as f:
for _ in f:
finished_records += 1
self.start_ep = (finished_records // (len(self.manager) * self.max_step_per_ep))
self.logger.info(('Loaded existing history; starting from ep %i' % self.start_ep))
|
def get_model_space(num_layers):
state_space = ModelSpace()
for i in range(num_layers):
state_space.add_layer(i, [State('Dense', units=5, activation='relu'), State('Dense', units=5, activation='tanh')])
return state_space
|
def get_input_nodes(num_inputs, with_input_blocks):
input_state = []
if with_input_blocks:
for node in range(num_inputs):
units = 1
name = ('X%i' % node)
node_op = State('input', shape=(units,), name=name)
input_state.append(node_op)
else:
input_state = [State('input', shape=(num_inputs,), name='Input')]
return input_state
|
def get_output_nodes():
output_op = State('Dense', units=1, activation='linear', name='output')
return output_op
|
def get_data(with_input_blocks):
np.random.seed(111)
n = 5000
p = 4
beta_a = np.array([0, 0, 0, 0]).astype('float32')
beta_i = np.array(((([0, 3, 0, 0] + ([0] * 3)) + [0, (- 2)]) + [0])).astype('float32')
simulator = HigherOrderSimulator(n=n, p=p, noise_var=0.1, x_var=1.0, degree=2, discretize_beta=True, discretize_x=False, with_input_blocks=with_input_blocks)
simulator.set_effect(beta_a, beta_i)
(X_train, y_train) = simulator.sample_data()
(X_val, y_val) = simulator.sample_data(N=500)
(X_test, y_test) = simulator.sample_data(N=500)
return ((X_train, y_train), (X_val, y_val), (X_test, y_test))
|
def get_data_correlated(with_input_blocks, corr_coef=0.6):
np.random.seed(111)
n = 5000
p = 4
beta_a = np.array([0, 0, 0, 0]).astype('float32')
beta_i = np.array(((([0, 3, 0, 0] + ([0] * 3)) + [0, (- 2)]) + [0])).astype('float32')
cov_mat = (np.eye(4) * 1.0)
cov_mat[(0, 2)] = cov_mat[(2, 0)] = (1.0 * corr_coef)
simulator = CorrelatedDataSimulator(n=n, p=p, noise_var=0.1, data_cov_matrix=cov_mat, degree=2, discretize_beta=True, with_input_blocks=with_input_blocks)
simulator.set_effect(beta_a, beta_i)
(X_train, y_train) = simulator.sample_data()
(X_val, y_val) = simulator.sample_data(N=500)
(X_test, y_test) = simulator.sample_data(N=500)
return ((X_train, y_train), (X_val, y_val), (X_test, y_test))
|
def get_knowledge_fn():
gkf = GraphKnowledgeHessFunc(total_feature_num=4)
adjacency = np.zeros((4, 4))
adjacency[(0, 1)] = adjacency[(1, 0)] = 3.0
adjacency[(2, 3)] = adjacency[(3, 2)] = (- 2.0)
(intr_idx, intr_eff) = gkf.convert_adjacency_to_knowledge(adjacency)
gkf.knowledge_encoder(intr_idx, intr_eff)
return gkf
|
def get_reward_fn(gkf, Lambda=1.0):
reward_fn = KnowledgeReward(gkf, Lambda=Lambda)
return reward_fn
|
def get_manager(train_data, validation_data, model_fn, reward_fn, wd='./tmp'):
model_compile_dict = {'loss': 'mse', 'optimizer': 'adam', 'metrics': ['mae']}
manager = GeneralManager(train_data, validation_data, working_dir=wd, model_fn=model_fn, reward_fn=reward_fn, post_processing_fn=store_with_hessian, model_compile_dict=model_compile_dict, epochs=100, verbose=0, child_batchsize=100)
return manager
|
def get_model_fn(model_space, inputs_op, output_op, num_layers, with_skip_connection, with_input_blocks):
model_compile_dict = {'loss': 'mse', 'optimizer': 'adam', 'metrics': ['mae']}
model_fn = DAGModelBuilder(inputs_op, output_op, num_layers, model_space, model_compile_dict, with_skip_connection, with_input_blocks, dag_func='InputBlockDAG')
return model_fn
|
def ID2arch(hist_df, state_str_to_state_shortname):
id2arch = {}
num_layers = sum([1 for x in hist_df.columns.values if x.startswith('L')])
for i in hist_df.ID:
arch = tuple((state_str_to_state_shortname[x][hist_df.loc[(hist_df.ID == i)][('L%i' % (x + 1))].iloc[0]] for x in range(num_layers)))
id2arch[i] = arch
return id2arch
|
def get_gold_standard(history_fn_list, state_space, metric_name_dict={'acc': 0, 'knowledge': 1, 'loss': 2}, id_remainder=None):
state_str_to_state_shortname = {}
for i in range(len(state_space)):
state_str_to_state_shortname[i] = {str(x): get_layer_shortname(x) for x in state_space[i]}
df = read_history(history_fn_list, metric_name_dict=metric_name_dict)
if (id_remainder is not None):
df.ID = (df.ID % id_remainder)
df.at[((df.ID == 0), 'ID')] = id_remainder
id2arch = ID2arch(df, state_str_to_state_shortname)
arch2id = {v: k for (k, v) in id2arch.items()}
gs = df.groupby(by='ID', as_index=False).agg(np.median)
gs['loss_rank'] = ss.rankdata(gs.loss)
gs['knowledge_rank'] = ss.rankdata(gs.knowledge)
return (gs, arch2id)
|
def get_gold_standard_arc_seq(history_fn_list, model_space, metric_name_dict, with_skip_connection, with_input_blocks, num_input_blocks):
model_gen = get_model_space_generator(model_space, with_skip_connection=with_skip_connection, with_input_blocks=with_input_blocks, num_input_blocks=num_input_blocks)
df = read_history(history_fn_list, metric_name_dict)
gs = df.groupby(by='ID', as_index=False).agg(np.median)
gs['loss_rank'] = ss.rankdata(gs.loss)
gs['knowledge_rank'] = ss.rankdata(gs.knowledge)
archs = [x for x in model_gen]
arch2id = {','.join([str(x) for x in archs[i]]): i for i in range(len(archs))}
return (gs, arch2id)
|
def get_model_space_generator(model_space, with_skip_connection, with_input_blocks, num_input_blocks=1):
new_space = []
num_layers = len(model_space)
for layer_id in range(num_layers):
new_space.append([x for x in range(len(model_space[layer_id]))])
if with_skip_connection:
for i in range(layer_id):
new_space.append([0, 1])
if with_input_blocks:
assert (num_input_blocks > 1), 'if `with_input_blocks=True`, expect `num_input_blocks>1`'
ib_arr = combine_input_blocks(num_layers, num_input_blocks)
insert_pos = np.zeros(num_layers, dtype='int32')
insert_pos[0] = p = 1
for i in range(1, num_layers):
p += (1 + ((i - 1) * with_skip_connection))
insert_pos[i] = p
for n in itertools.product(*new_space):
for ib in ib_arr:
tmp = []
for (layer_op, layer_ib) in zip(np.split(n, insert_pos), (list(ib) + [np.array([])])):
tmp.extend(layer_op)
tmp.extend(layer_ib)
(yield np.array(tmp))
else:
return itertools.product(*new_space)
|
def combine_input_blocks(num_layers, num_input_blocks):
'return all combinations of input_blocks when `input_block_unique_connection=True`\n '
cmb_arr = np.zeros(((num_layers ** num_input_blocks), num_layers, num_input_blocks), dtype='int32')
cmb_list = [list(range(num_layers)) for _ in range(num_input_blocks)]
idx_g = list(itertools.product(*cmb_list))
for i in range(cmb_arr.shape[0]):
idxs = idx_g[i]
for j in range(len(idxs)):
cmb_arr[(i, idxs[j], j)] = 1
return cmb_arr
|
def train_hist_csv_writter(writer, trial, loss_and_metrics, reward, model_states):
data = [trial, [loss_and_metrics[x] for x in sorted(loss_and_metrics.keys())], reward]
action_list = [str(x) for x in model_states]
data.extend(action_list)
writer.writerow(data)
print(action_list)
|
def rewrite_train_hist(working_dir, model_fn, knowledge_fn, data, suffix='new', metric_name_dict={'acc': 0, 'knowledge': 1, 'loss': 2}):
import tensorflow as tf
from ..utils.io import read_history
old_df = read_history([os.path.join(working_dir, 'train_history.csv')], metric_name_dict)
new_fh = open(os.path.join(working_dir, ('train_history-%s.csv' % suffix)), 'w')
csv_writter = csv.writer(new_fh)
total_layers = (max([int(x.lstrip('L')) for x in old_df.columns.values if x.lstrip('L').isdigit()]) + 1)
for i in range(old_df.shape[0]):
id = old_df['ID'][i]
param_fp = os.path.join(old_df['dir'][i], 'weights', ('trial_%i' % id), 'bestmodel.h5')
arc = np.array([old_df[('L%i' % l)][i] for l in range(1, total_layers)], dtype=np.int32)
train_graph = tf.Graph()
train_sess = tf.Session(graph=train_graph)
with train_graph.as_default(), train_sess.as_default():
model = model_fn(arc)
model.load_weights(param_fp)
new_k = knowledge_fn(model, data)
loss_and_metrics = {x: old_df[x][i] for x in metric_name_dict}
loss_and_metrics.update({'knowledge': new_k})
reward = (old_df['loss'][i] + new_k)
train_hist_csv_writter(csv_writter, id, loss_and_metrics, reward, arc)
new_fh.flush()
new_fh.close()
return
|
def grid_search(model_space_generator, manager, working_dir, B=10, resume_prev_run=True):
write_mode = ('a' if resume_prev_run else 'w')
fh = open(os.path.join(working_dir, 'train_history.csv'), write_mode)
writer = csv.writer(fh)
i = 0
for b in range(B):
if getattr(model_space_generator, '__next__', None):
model_space_generator_ = model_space_generator
else:
model_space_generator_ = itertools.product(*model_space_generator)
for model_states in model_space_generator_:
i += 1
print('B={} i={} arc={}'.format(b, i, ','.join([str(x) for x in model_states])))
if (not os.path.isdir(os.path.join(working_dir, 'weights', ('trial_%i' % i)))):
os.makedirs(os.path.join(working_dir, 'weights', ('trial_%i' % i)))
if (resume_prev_run and os.path.isfile(os.path.join(working_dir, 'weights', ('trial_%i' % i), 'bestmodel.h5'))):
continue
(reward, loss_and_metrics) = manager.get_rewards(i, model_states)
train_hist_csv_writter(writer, i, loss_and_metrics, reward, model_states)
fh.flush()
fh.close()
return
|
def get_mock_reward(model_states, train_history_df, metric, stringify_states=True):
if stringify_states:
model_states_ = [str(x) for x in model_states]
else:
model_states_ = model_states
idx_bool = np.array([(train_history_df[('L%i' % (i + 1))] == model_states_[i]) for i in range(len(model_states_))])
index = np.apply_along_axis(func1d=(lambda x: all(x)), axis=0, arr=idx_bool)
if (np.sum(index) == 0):
idx = train_history_df['loss'].idxmax()
return train_history_df[metric].iloc[idx]
else:
return train_history_df[metric].iloc[np.random.choice(np.where(index)[0])]
|
def get_default_mock_reward_fn(model_states, train_history_df, lbd=1.0, metric=['loss', 'knowledge', 'acc']):
Lambda = lbd
mock_reward = get_mock_reward(model_states, train_history_df, metric)
this_reward = (- (mock_reward['loss'] + (Lambda * mock_reward['knowledge'])))
loss_and_metrics = [mock_reward['loss'], mock_reward['acc']]
reward_metrics = {'knowledge': mock_reward['knowledge']}
return (this_reward, loss_and_metrics, reward_metrics)
|
def get_mock_reward_fn(train_history_df, metric, stringify_states, lbd=1.0):
def reward_fn(model_states, *args, **kwargs):
mock_reward = get_mock_reward(model_states, train_history_df, metric, stringify_states)
this_reward = (- (mock_reward['loss'] + (lbd * mock_reward['knowledge'])))
loss_and_metrics = ([mock_reward['loss']] + [mock_reward[x] for x in metric if ((x != 'loss') and (x != 'knowledge'))])
reward_metrics = {'knowledge': mock_reward['knowledge']}
return (this_reward, loss_and_metrics, reward_metrics)
return reward_fn
|
class MockManager(GeneralManager):
'Helper class for bootstrapping a random reward for any given architecture from a set of history records'
def __init__(self, history_fn_list, model_compile_dict, train_data=None, validation_data=None, input_state=None, output_state=None, model_fn=None, reward_fn=None, post_processing_fn=None, working_dir='.', Lambda=1.0, acc_beta=0.8, clip_rewards=0.0, metric_name_dict={'acc': 0, 'knowledge': 1, 'loss': 2}, verbose=0):
assert (type(Lambda) in (float, int)), 'Lambda potentially confused with `Keras.core.Lambda` layer'
self._lambda = Lambda
self.reward_fn = reward_fn
self.model_fn = model_fn
self.model_compile_dict = model_compile_dict
self.train_history_df = read_history(history_fn_list, metric_name_dict)
self.clip_rewards = clip_rewards
self.verbose = verbose
self.working_dir = working_dir
if (not os.path.exists(self.working_dir)):
os.makedirs(self.working_dir)
self.beta = acc_beta
self.beta_bias = acc_beta
self.moving_reward = 0.0
def __str__(self):
s = ('MockManager with %i records' % self.train_history_df.shape[0])
return s
def get_rewards(self, trial, model_states=None, **kwargs):
if (model_states is None):
model_states = kwargs.pop('model_arc', None)
if self.reward_fn:
(this_reward, loss_and_metrics, reward_metrics) = self.reward_fn(model_states, self.train_history_df, self._lambda)
else:
(this_reward, loss_and_metrics, reward_metrics) = get_default_mock_reward_fn(model_states, self.train_history_df, self._lambda)
loss = loss_and_metrics.pop(0)
loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in range(len(loss_and_metrics))}
loss_and_metrics['loss'] = loss
if reward_metrics:
loss_and_metrics.update(reward_metrics)
return (this_reward, loss_and_metrics)
|
def get_state_space():
'State_space is the place we define all possible operations (called `States`) on each layer to stack a neural net.\n The state_space is defined in a layer-by-layer manner, i.e. first define the first layer (layer 0), then layer 1,\n so on and so forth. See below for how to define all possible choices for a given layer.\n\n Returns\n -------\n a pre-defined state_space object\n\n Notes\n ------\n Use key-word arguments to define layer-specific attributes.\n\n Adding `Identity` state to a layer is basically omitting a layer by performing no operations.\n '
state_space = ModelSpace()
state_space.add_layer(0, [State('conv1d', filters=3, kernel_size=8, kernel_initializer='glorot_uniform', activation='relu', name='conv1'), State('conv1d', filters=3, kernel_size=14, kernel_initializer='glorot_uniform', activation='relu', name='conv1'), State('conv1d', filters=3, kernel_size=20, kernel_initializer='glorot_uniform', activation='relu', name='conv1'), State('denovo', filters=3, kernel_size=8, lambda_pos=0.0001, lambda_l1=0.0001, lambda_filter=1e-08, name='conv1'), State('denovo', filters=3, kernel_size=14, lambda_pos=0.0001, lambda_l1=0.0001, lambda_filter=1e-08, name='conv1'), State('denovo', filters=3, kernel_size=20, lambda_pos=0.0001, lambda_l1=0.0001, lambda_filter=1e-08, name='conv1')])
state_space.add_layer(1, [State('Identity'), State('maxpool1d', pool_size=8, strides=8), State('avgpool1d', pool_size=8, strides=8)])
state_space.add_layer(2, [State('Flatten'), State('GlobalMaxPool1D'), State('GlobalAvgPool1D'), State('SFC', output_dim=10, symmetric=True, smoothness_penalty=1.0, smoothness_l1=True, smoothness_second_diff=True, curvature_constraint=10.0, name='sfc')])
state_space.add_layer(3, [State('Dense', units=3, activation='relu'), State('Dense', units=10, activation='relu'), State('Identity')])
return state_space
|
def get_data():
'Test function for reading data from a set of FASTA sequences. Read Positive and Negative FASTA files, and\n convert to 4 x N matrices.\n '
pos_file = resource_filename('amber.resources', 'simdata/DensityEmbedding_motifs-MYC_known1_min-1_max-1_mean-1_zeroProb-0p0_seqLength-200_numSeqs-10000.fa.gz')
neg_file = resource_filename('amber.resources', 'simdata/EmptyBackground_seqLength-200_numSeqs-10000.fa.gz')
(X, y) = data_parser.get_data_from_fasta_sequence(pos_file, neg_file)
(X_train, y_train, X_test, y_test) = (X[:18000], y[:18000], X[18000:], y[18000:])
return ((X_train, y_train), (X_test, y_test))
|
class DataToParse():
def __init__(self, path, method=None):
self.path = path
self.method = method
self._extension()
def _extension(self):
ext = os.path.splitext(self.path)[1]
if (ext in ('.pkl', '.pickle')):
self.method = 'pickle'
elif (ext in ('.npy',)):
self.method = 'numpy'
elif (ext in ('.h5', '.hdf5')):
self.method = 'hdf5'
else:
raise Exception(('Unknown data format: %s' % self.path))
def __str__(self):
s = ('DataToParse-%s' % self.path)
return s
def unpack(self):
assert os.path.isfile(self.path), ('File does not exist: %s' % self.path)
assert (self.method is not None), ('Cannot determine parse method for file: %s' % self.path)
print(('unpacking data.. %s' % self.path))
if (self.method == 'pickle'):
import pickle
return pickle.load(open(self.path, 'rb'))
elif (self.method == 'numpy'):
import numpy
return numpy.load(self.path)
elif (self.method == 'hdf5'):
import h5py
return h5py.File(self.path, 'r')
|
def load_data_dict(d):
for (k, v) in d.items():
if (type(v) is DataToParse):
d[k] = v.unpack()
elif (type(v) is str):
assert os.path.isfile(v), ('cannot find file: %s' % v)
d[k] = DataToParse(v).unpack()
return d
|
def get_train_env(env_type, controller, manager, *args, **kwargs):
if (env_type == 'ControllerTrainEnv'):
from .architect.trainEnv import ControllerTrainEnvironment
env = ControllerTrainEnvironment(*args, controller=controller, manager=manager, **kwargs)
elif (env_type == 'EnasTrainEnv'):
from .architect.trainEnv import EnasTrainEnv
env = EnasTrainEnv(*args, controller=controller, manager=manager, **kwargs)
else:
raise Exception(('cannot understand manager type: %s' % env_type))
print(('env_type = %s' % env_type))
return env
|
def get_controller(controller_type, model_space, session, **kwargs):
if ((controller_type == 'General') or (controller_type == 'GeneralController')):
from .architect import GeneralController
controller = GeneralController(model_space=model_space, session=session, **kwargs)
elif ((controller_type == 'Operation') or (controller_type == 'OperationController')):
from .architect import OperationController
controller = OperationController(model_space=model_space, **kwargs)
elif ((controller_type == 'MultiIO') or (controller_type == 'MultiIOController')):
from .architect import MultiIOController
controller = MultiIOController(model_space=model_space, session=session, **kwargs)
elif ((controller_type == 'ZeroShot') or (controller_type == 'ZeroShotController')):
from .architect import ZeroShotController
controller = ZeroShotController(model_space=model_space, session=session, **kwargs)
else:
raise Exception(('cannot understand controller type: %s' % controller_type))
print(('controller = %s' % controller_type))
return controller
|
def get_model_space(arg):
from .architect.modelSpace import ModelSpace
if (type(arg) is str):
if (arg == 'Default ANN'):
from .bootstrap.dense_skipcon_space import get_model_space as ms_ann
model_space = ms_ann(3)
elif (arg == 'Default 1D-CNN'):
from .bootstrap.simple_conv1d_space import get_state_space as ms_cnn
model_space = ms_cnn()
else:
raise Exception(('cannot understand string model_space arg: %s' % arg))
elif (type(arg) in (dict, list)):
model_space = ModelSpace.from_dict(arg)
elif isinstance(arg, ModelSpace):
model_space = arg
else:
raise Exception(('cannot understand non-string model_space arg: %s' % arg))
return model_space
|
def get_manager(manager_type, model_fn, reward_fn, data_dict, session, *args, **kwargs):
data_dict = load_data_dict(data_dict)
if ((manager_type == 'General') or (manager_type == 'GeneralManager')):
from .architect.manager import GeneralManager
manager = GeneralManager(*args, model_fn=model_fn, reward_fn=reward_fn, train_data=data_dict['train_data'], validation_data=data_dict['validation_data'], **kwargs)
elif ((manager_type == 'EnasManager') or (manager_type == 'Enas')):
from .architect.manager import EnasManager
manager = EnasManager(*args, model_fn=model_fn, reward_fn=reward_fn, train_data=data_dict['train_data'], validation_data=data_dict['validation_data'], session=session, **kwargs)
elif ((manager_type == 'Mock') or (manager_type == 'MockManager')):
from .bootstrap.mock_manager import MockManager
manager = MockManager(*args, model_fn=model_fn, reward_fn=reward_fn, **kwargs)
elif ((manager_type == 'Distributed') or (manager_type == 'DistributedManager')):
from .architect.manager import DistributedGeneralManager
train_data_kwargs = kwargs.pop('train_data_kwargs', None)
validate_data_kwargs = kwargs.pop('validate_data_kwargs', None)
devices = kwargs.pop('devices', None)
manager = DistributedGeneralManager(*args, devices=devices, train_data_kwargs=train_data_kwargs, validate_data_kwargs=validate_data_kwargs, model_fn=model_fn, reward_fn=reward_fn, train_data=data_dict['train_data'], validation_data=data_dict['validation_data'], **kwargs)
else:
raise Exception(('cannot understand manager type: %s' % manager_type))
print(('manager = %s' % manager_type))
return manager
|
def get_modeler(model_fn_type, model_space, session, *args, **kwargs):
from .architect.modelSpace import State
if ((model_fn_type == 'DAG') or (model_fn_type == 'DAGModelBuilder')):
from .modeler import DAGModelBuilder
assert (('inputs_op' in kwargs) and ('outputs_op' in kwargs))
inp_op_list = kwargs.pop('inputs_op')
inputs_op = [(State(**x) if (not isinstance(x, State)) else x) for x in inp_op_list]
out_op_list = kwargs.pop('outputs_op')
output_op = [(State(**x) if (not isinstance(x, State)) else x) for x in out_op_list]
model_fn = DAGModelBuilder(*args, model_space=model_space, num_layers=len(model_space), inputs_op=inputs_op, output_op=output_op, session=session, **kwargs)
elif ((model_fn_type == 'Enas') or (model_fn_type == 'EnasAnnModelBuilder')):
from .modeler import EnasAnnModelBuilder
inp_op_list = kwargs.pop('inputs_op')
inputs_op = [(State(**x) if (not isinstance(x, State)) else x) for x in inp_op_list]
out_op_list = kwargs.pop('outputs_op')
output_op = [(State(**x) if (not isinstance(x, State)) else x) for x in out_op_list]
model_fn = EnasAnnModelBuilder(*args, model_space=model_space, num_layers=len(model_space), inputs_op=inputs_op, output_op=output_op, session=session, **kwargs)
elif (model_fn_type == 'EnasCnnModelBuilder'):
from .modeler import EnasCnnModelBuilder
inp_op_list = kwargs.pop('inputs_op')
inputs_op = [(State(**x) if (not isinstance(x, State)) else x) for x in inp_op_list]
out_op_list = kwargs.pop('outputs_op')
output_op = [(State(**x) if (not isinstance(x, State)) else x) for x in out_op_list]
controller = kwargs.pop('controller')
model_fn = EnasCnnModelBuilder(*args, model_space=model_space, num_layers=len(model_space), inputs_op=inputs_op, output_op=output_op, session=session, controller=controller, **kwargs)
elif (model_fn_type == 'KerasModelBuilder'):
from .modeler import KerasModelBuilder
inp_op_list = kwargs.pop('inputs_op')
inputs_op = [(State(**x) if (not isinstance(x, State)) else x) for x in inp_op_list]
assert (len(inputs_op) == 1), 'KerasModelBuilder only accepts one input; try KerasMultiIOModelBuilder for multiple inputs'
out_op_list = kwargs.pop('outputs_op')
output_op = [(State(**x) if (not isinstance(x, State)) else x) for x in out_op_list]
assert (len(output_op) == 1), 'KerasModelBuilder only accepts one output; try KerasMultiIOModelBuilder for multiple outputs'
model_fn = KerasModelBuilder(*args, inputs=inputs_op[0], outputs=output_op[0], model_space=model_space, **kwargs)
elif (model_fn_type == 'KerasMultiIOModelBuilder'):
from .modeler import KerasMultiIOModelBuilder
inp_op_list = kwargs.pop('inputs_op')
inputs_op = [(State(**x) if (not isinstance(x, State)) else x) for x in inp_op_list]
out_op_list = kwargs.pop('outputs_op')
output_op = [(State(**x) if (not isinstance(x, State)) else x) for x in out_op_list]
model_fn = KerasMultiIOModelBuilder(*args, model_space=model_space, inputs_op=inputs_op, output_op=output_op, session=session, **kwargs)
elif (model_fn_type == 'KerasBranchModelBuilder'):
from .modeler import KerasBranchModelBuilder
inp_op_list = kwargs.pop('inputs_op')
inputs_op = [(State(**x) if (not isinstance(x, State)) else x) for x in inp_op_list]
out_op_list = kwargs.pop('outputs_op')
output_op = [(State(**x) if (not isinstance(x, State)) else x) for x in out_op_list]
assert (len(output_op) == 1)
model_fn = KerasBranchModelBuilder(*args, model_space=model_space, inputs_op=inputs_op, output_op=output_op[0], **kwargs)
else:
raise Exception(('cannot understand model_builder type: %s' % model_fn_type))
print(('modeler = %s' % model_fn_type))
return model_fn
|
def get_reward_fn(reward_fn_type, knowledge_fn, *args, **kwargs):
if (reward_fn_type == 'KnowledgeReward'):
from .architect.reward import KnowledgeReward
reward_fn = KnowledgeReward(knowledge_fn, *args, **kwargs)
elif (reward_fn_type == 'LossReward'):
from .architect.reward import LossReward
assert (knowledge_fn is None), ('Incompatability: LossReward must have knownledge_fn=None; got %s' % knowledge_fn)
reward_fn = LossReward(*args, **kwargs)
elif (reward_fn_type == 'Mock_Reward'):
from .architect.reward import MockReward
reward_fn = MockReward(*args, **kwargs)
elif (reward_fn_type == 'LossAucReward'):
from .architect.reward import LossAucReward
reward_fn = LossAucReward(*args, knowledge_function=knowledge_fn, **kwargs)
else:
raise Exception(('cannot understand reward_fn type: %s' % reward_fn_type))
print(('reward = %s' % reward_fn_type))
return reward_fn
|
def get_knowledge_fn(knowledge_fn_type, knowledge_data_dict, *args, **kwargs):
if (knowledge_data_dict is not None):
knowledge_data_dict = load_data_dict(knowledge_data_dict)
if ((knowledge_fn_type == 'ght') or (knowledge_fn_type == 'GraphHierarchyTree')):
from .objective import GraphHierarchyTree
k_fn = GraphHierarchyTree(*args, **kwargs)
elif ((knowledge_fn_type == 'ghtal') or (knowledge_fn_type == 'GraphHierarchyTreeAuxLoss')):
from .objective import GraphHierarchyTreeAuxLoss
k_fn = GraphHierarchyTreeAuxLoss(*args, **kwargs)
elif (knowledge_fn_type == 'Motif'):
from .objective import MotifKLDivergence
k_fn = MotifKLDivergence(*args, **kwargs)
elif (knowledge_fn_type == 'AuxilaryAcc'):
from .objective import AuxilaryAcc
k_fn = AuxilaryAcc(*args, **kwargs)
elif ((knowledge_fn_type == 'None') or (knowledge_fn_type == 'zero')):
k_fn = None
else:
raise Exception(('cannot understand knowledge_fn type: %s' % knowledge_fn_type))
if (k_fn is not None):
if hasattr(k_fn, 'knowledge_encoder'):
k_fn.knowledge_encoder(**knowledge_data_dict)
print(('knowledge = %s' % knowledge_fn_type))
return k_fn
|
def get_model_and_io_nodes(model_space_arg):
import json
import ast
def eval_shape(d_):
for j in range(len(d_)):
if (('shape' in d_[j]) and (type(d_[j]['shape']) is str)):
d_[j]['shape'] = ast.literal_eval(d_[j]['shape'])
return d_
if os.path.isfile(model_space_arg):
with open(model_space_arg, 'r') as f:
d = json.load(f)
model_space = d['model_space']
d['input_states'] = eval_shape(d['input_states'])
input_states = d['input_states']
d['output_state'] = eval_shape([d['output_state']])[0]
output_state = d['output_state']
return (model_space, input_states, output_state)
else:
raise Exception(('cannot open file: %s' % model_space_arg))
|
def gui_mapper(var_dict):
wd = var_dict['wd']
train_data = DataToParse(var_dict['train_data'])
val_data = DataToParse(var_dict['validation_data'])
(model_space, input_states, output_state) = get_model_and_io_nodes(var_dict['model_space'])
model_compile_dict = {'optimizer': var_dict['optimizer'], 'loss': var_dict['child_loss'], 'metrics': [x.strip() for x in var_dict['child_metrics'].strip('[]').split(',') if len(x.strip())]}
knowledge_params = eval(var_dict['knowledge_specific_settings'])
assert (type(knowledge_params) is dict), ('Error in parsing `knowledge_specific settings`, must be a dict:\n %s' % knowledge_params)
knowledge_data = DataToParse(var_dict['knowledge_data']).unpack()
type_dict = {'controller_type': var_dict['controller_type'], 'model_fn_type': var_dict['model_builder'], 'knowledge_fn_type': var_dict['knowledge_fn'], 'reward_fn_type': var_dict['reward_fn'], 'manager_type': var_dict['manager_type'], 'env_type': var_dict['env_type']}
specs = {'controller': {'use_ppo_loss': (var_dict['optim_method'] == 'PPO'), 'num_input_blocks': len(input_states), 'lstm_size': int(var_dict['lstm_size']), 'lstm_num_layers': int(var_dict['lstm_layers']), 'kl_threshold': float(var_dict['kl_cutoff']), 'train_pi_iter': int(var_dict['ctrl_epoch']), 'lr_init': float(var_dict['ctrl_lr']), 'buffer_size': int(var_dict['ctrl_buffer_size']), 'batch_size': int(var_dict['ctrl_batch_size'])}, 'model_space': model_space, 'model_builder': {'input_states': input_states, 'output_state': output_state, 'model_compile_dict': model_compile_dict, 'dag_func': var_dict['dag_func']}, 'knowledge_fn': {'params': knowledge_params, 'data': knowledge_data}, 'reward_fn': {'Lambda': float(var_dict['knowledge_weight']), 'knowledge_c': (None if (var_dict['knowledge_c'] == 'None') else float(var_dict['knowledge_c'])), 'loss_c': (None if (var_dict['loss_c'] == 'None') else float(var_dict['loss_c']))}, 'manager': {'params': {'working_dir': wd, 'model_compile_dict': model_compile_dict, 'post_processing_fn': var_dict['postprocessing_fn'], 'epochs': int(var_dict['child_epochs']), 'verbose': int(var_dict['manager_verbosity']), 'child_batchsize': int(var_dict['child_batch_size'])}, 'data': {'train_data': train_data, 'validation_data': val_data}}, 'train_env': {'max_episode': int(var_dict['total_steps']), 'max_step_per_ep': int(var_dict['samples_per_step']), 'should_plot': True, 'working_dir': wd, 'squeezed_action': True}}
return (type_dict, specs)
|
def load_images(wd, frame):
global images_, captions_
pngs = sorted([x for x in os.listdir(wd) if x.endswith('png')])
images_ = [os.path.join(wd, x) for x in pngs]
captions_ = [x.split('.')[0] for x in pngs]
if (len(pngs) == 0):
messagebox.showinfo('Warning', 'Load failed. No figures found.')
return
for g in GALLERY_SHOW_TYPES:
images_type_indices[g] = []
for i in range(len(captions_)):
if captions_[i].startswith(GALLERY_SHOWTYPE_STARTSWTIH[g]):
images_type_indices[g].append(i)
move(0, frame)
|
def move(delta, frame):
global current, images_, images_type_indices
gallery_type = frame.gallery_showtype.get()
if (not (0 <= (current + delta) < len(images_type_indices[gallery_type]))):
if (len(images_type_indices[gallery_type]) == 0):
frame.gallery_showtype.set(GALLERY_SHOW_TYPES[0])
else:
current = ((- 1) if ((current + delta) >= len(images_type_indices[gallery_type])) else len(images_type_indices[gallery_type]))
current += delta
image = Image.open(images_[images_type_indices[gallery_type][current]]).resize(GALLERY_SIZE, Image.ANTIALIAS)
photo = ImageTk.PhotoImage(image)
page_count = ('Figure %i / %i :' % ((current + 1), len(images_type_indices[gallery_type])))
frame.demo_label['text'] = ((page_count + ' ') + captions_[images_type_indices[gallery_type][current]])
frame.demo_label['image'] = photo
frame.demo_label.photo = photo
|
class EvaluateTab(tk.Frame):
def __init__(self, parent, controller, global_wd, global_thread_spawner=None, prev_=None, next_=None):
tk.Frame.__init__(self, master=parent, bg=BODY_COLOR)
self.global_wd = global_wd
self.global_thread_spawner = global_thread_spawner
self._create_header(controller, prev_, next_)
self._create_left_column()
self._create_gallery_column()
self.animation_register = []
self.has_run = False
def _create_header(self, controller, prev_, next_):
self.header = tk.Frame(master=self, width=800, height=20, bg=BODY_COLOR)
self.header.grid(row=0, columnspan=5, sticky='nw')
button2 = tk.Button(self.header, text='Discover ->', command=(lambda : controller.show_frame(next_)), bg=BTN_BG)
button2.grid(row=0, column=2, sticky='w')
button1 = tk.Button(self.header, text='<- Train', command=(lambda : controller.show_frame(prev_)), bg=BTN_BG)
button1.grid(row=0, column=1, sticky='w')
def _create_left_column(self):
self.left_column = tk.Frame(master=self, width=300, height=600, bg=SIDEBAR_COLOR)
self.left_column.grid(row=1, column=0, columnspan=2, sticky='nw')
self.left_column.rowconfigure(0, weight=1)
label = tk.Label(self.left_column, text='Gallery', font=LARGE_FONT, bg=BODY_COLOR, justify=tk.CENTER)
label.grid(row=0, columnspan=2, sticky='ew')
button1 = tk.Button(self.left_column, text='Load', command=(lambda : load_images(os.path.join(self.global_wd.get()), self)), bg=BTN_BG)
button1.grid(row=6, column=0, columnspan=2, sticky='we')
button2 = tk.Button(self.left_column, text='<--', command=(lambda : move((- 1), self)), bg=BTN_BG)
button2.grid(row=7, column=0, columnspan=1, sticky='we')
button3 = tk.Button(self.left_column, text='-->', command=(lambda : move((+ 1), self)), bg=BTN_BG)
button3.grid(row=7, column=1, columnspan=1, sticky='we')
sep = ttk.Separator(master=self.left_column, orient=tk.HORIZONTAL)
sep.grid(row=8, columnspan=2, sticky='ew', pady=10)
self.gallery_showtype = tk.StringVar(self.left_column)
self.gallery_showtype.set(GALLERY_SHOW_TYPES[0])
popupMenu = tk.OptionMenu(self.left_column, self.gallery_showtype, *GALLERY_SHOW_TYPES, command=self._popup_menu_move)
popupMenu.config(bg=BODY_COLOR)
tk.Label(self.left_column, text='Gallery show type:', bg=BODY_COLOR).grid(row=9, columnspan=2, sticky='w')
popupMenu.grid(row=10, columnspan=2)
def _popup_menu_move(self, *args):
global current
current = 0
move(0, self)
def _create_gallery_column(self):
self.right_column = tk.Frame(master=self, width=500, height=600, bg=BODY_COLOR)
self.right_column.grid(row=1, column=2, columnspan=3, sticky='nswe')
label = tk.Label(self.right_column, compound=tk.TOP, font=LARGE_FONT, bg=BODY_COLOR)
label.grid(row=0, sticky='nsew')
self.demo_label = label
|
def parse_layout(master):
frames = []
var_dict = {}
for tab_name in PARAMS_LAYOUT:
f = tk.Frame(master=master, bg=BODY_COLOR)
f.grid_columnconfigure([0, 1, 2, 3], minsize=100)
for (k, v) in PARAMS_LAYOUT[tab_name].items():
try:
(str_var, widget, btn_var) = create_widget(v['value'], f)
if ((type(widget) is tk.Text) and ('default' in v)):
widget.insert('end', v['default'])
widget.grid(**v['wpos'])
except Exception as e:
raise Exception(('%s\n caused by (%s, %s)' % (e, k, v)))
if (str_var is not None):
var_dict[k] = (str_var, btn_var)
label = tk.Label(f, text=(k + ': '), bg=BODY_COLOR)
label.grid(**v['lpos'])
frames.append(f)
return (frames, var_dict)
|
def pretty_print_dict(d):
print(('-' * 80))
for (k, v) in d.items():
print(k, ' = ', v)
|
class InitializeTab(tk.Frame):
def __init__(self, parent, controller, global_wd, global_thread_spawner=None, prev_=None, next_=None):
tk.Frame.__init__(self, parent, bg=BODY_COLOR)
self.global_wd = global_wd
self.var_dict = {'wd': (global_wd, 0)}
self.animation_register = []
self.param_tab_cont = 0
self._create_header(controller, prev_, next_)
self._create_left_column()
self._create_right_column()
def _create_header(self, controller, prev_, next_):
self.header = tk.Frame(master=self, width=800, height=20, bg=BODY_COLOR)
self.header.grid(row=0, columnspan=5, sticky='nw')
button2 = tk.Button(self.header, text='Train ->', command=(lambda : controller.show_frame(next_)), bg=BTN_BG)
button2.grid(row=0, column=2, sticky='w')
button1 = tk.Button(self.header, text='<- Home', command=(lambda : controller.show_frame(prev_)), bg=BTN_BG)
button1.grid(row=0, column=1, sticky='w')
def _create_left_column(self):
self.left_column = tk.Frame(master=self, width=300, height=600, bg=SIDEBAR_COLOR)
self.left_column.grid(row=1, column=0, columnspan=2, sticky='nw')
self.left_column.rowconfigure(0, weight=1)
label = tk.Label(self.left_column, text='Configuration', font=LARGE_FONT, bg=BODY_COLOR, justify=tk.CENTER)
label.grid(row=0, columnspan=2, sticky='ew')
button0 = tk.Button(self.left_column, text='Load', command=self.load, bg=BTN_BG)
button0.grid(row=2, column=0, columnspan=1, sticky='we')
button1 = tk.Button(self.left_column, text='Save', command=self.save, bg=BTN_BG)
button1.grid(row=2, column=1, columnspan=1, sticky='we')
button2 = tk.Button(self.left_column, text='<--', command=(lambda : self.move((- 1))), bg=BTN_BG)
button2.grid(row=3, column=0, columnspan=1, sticky='we')
button3 = tk.Button(self.left_column, text='-->', command=(lambda : self.move((+ 1))), bg=BTN_BG)
button3.grid(row=3, column=1, columnspan=1, sticky='we')
sep = ttk.Separator(master=self.left_column, orient=tk.HORIZONTAL)
sep.grid(row=6, columnspan=2, sticky='ew', pady=10)
self.params_showtype = tk.StringVar(self.left_column)
self.params_showtype.set(PARAMS_SHOW_TYPES[0])
showtype_to_index = {PARAMS_SHOW_TYPES[i]: i for i in range(len(PARAMS_SHOW_TYPES))}
popupMenu = tk.OptionMenu(self.left_column, self.params_showtype, *PARAMS_SHOW_TYPES, command=(lambda x: self.move(delta=(showtype_to_index[self.params_showtype.get()] - self.param_tab_cont))))
popupMenu.config(bg=BODY_COLOR)
tk.Label(self.left_column, text='Parameters:', bg=BODY_COLOR).grid(row=7, columnspan=2, sticky='w')
popupMenu.grid(row=8, columnspan=2)
def _create_right_column(self):
self.right_column = tk.Frame(master=self, width=500, height=600, bg=BODY_COLOR)
self.right_column.grid(row=1, column=2, columnspan=3, sticky='nw')
(self.frames, var_dict) = parse_layout(self.right_column)
self.var_dict.update(var_dict)
_ = list(map((lambda x: x.grid(row=0, column=0, sticky='nesw')), self.frames))
self.frames[0].tkraise()
def parse_vars(self, verbose=0):
var_dict = {}
for (k, v) in self.var_dict.items():
if (type(v[0]) is tk.StringVar):
v_ = v[0].get()
elif (type(v[0]) is tk.Text):
v_ = v[0].get(1.0, 'end-1c')
else:
raise Exception(('Error in parse_vars: %s, %s' % (k, v)))
var_dict[k] = v_
if verbose:
pretty_print_dict(var_dict)
return var_dict
def move(self, delta):
self.param_tab_cont += delta
if (self.param_tab_cont >= len(self.frames)):
self.param_tab_cont = 0
if (self.param_tab_cont < 0):
self.param_tab_cont = (len(self.frames) - 1)
self.frames[self.param_tab_cont].tkraise()
def preset(self):
(types, specs) = gui_mapper(self.parse_vars(verbose=0))
pretty_print_dict(types)
pretty_print_dict(specs)
def save(self):
var_dict = self.parse_vars(verbose=0)
param_fp = os.path.join(self.global_wd.get(), 'param_config.json')
if os.path.isfile(param_fp):
is_overwrite = tk.messagebox.askquestion('File exists', 'Are you sure you want to overwrite the parameter file?', icon='warning')
if (is_overwrite != 'yes'):
return
with open(param_fp, 'w', newline='\n') as f:
json.dump(var_dict, f, indent=4)
def load(self, fp=None):
if (fp is None):
fp = filedialog.askopenfilename(initialdir=self.global_wd.get())
if (not len(fp)):
fp = os.path.join(self.global_wd.get(), 'param_config.json')
print(fp)
if (not os.path.isfile(fp)):
return
with open(fp, 'r') as f:
var_dict = json.load(f)
for (k, v) in var_dict.items():
if (k == 'wd'):
continue
if (type(self.var_dict[k][0]) is tk.StringVar):
self.var_dict[k][0].set(v)
self.var_dict[k][(- 1)].set(os.path.basename(v)[:15])
elif (type(self.var_dict[k][0]) is tk.Text):
self.var_dict[k][0].delete(1.0, 'end')
self.var_dict[k][0].insert('end', v)
return
|
class AmberApp(tk.Tk):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.geometry('800x600+500+100')
self.resizable(0, 0)
self.style = ttk.Style()
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
self.global_wd = None
self.title('Amber - Main')
def _connect_global_wd(self, wd):
self.global_wd = wd
def _enter(self):
assert (self.global_wd is not None), 'must connect to a global var `wd` before enter'
self.welcome_window = welcome_page(master=self, global_wd=self.global_wd)
self.withdraw()
def _create_tabs(self):
self.tc = TabController(master=self)
self.tc.grid(row=0, column=0, sticky='nsew')
self.animation_register = []
for tab in self.tc.tabs:
try:
self.animation_register.extend(self.tc.tabs[tab].animation_register)
except Exception as e:
print(('error: %s' % e))
pass
|
class TabController(tk.Frame):
def __init__(self, master, global_wd, global_thread_spawner, *args, **kwargs):
super().__init__(*args, master=master, **kwargs)
self.global_wd = global_wd
self.global_thread_spawner = global_thread_spawner
container = tk.Frame(master=self, width=800, height=750, bg=BODY_COLOR)
container.grid(row=1, sticky='nsew')
container.grid_propagate(0)
self.tabs = {}
page_links = {InitializeTab: {'prev': None, 'next': TrainTab}, TrainTab: {'prev': InitializeTab, 'next': EvaluateTab}, EvaluateTab: {'prev': TrainTab, 'next': None}}
self.tab_list = [InitializeTab, TrainTab, EvaluateTab]
for F in self.tab_list:
frame = F(parent=container, controller=self, global_wd=self.global_wd, global_thread_spawner=self.global_thread_spawner, prev_=page_links[F]['prev'], next_=page_links[F]['next'])
frame.config({'bg': BODY_COLOR})
self.tabs[F] = frame
frame.grid(row=0, column=0, sticky='nsew')
self._create_menu()
self.tabs[TrainTab].init_page = self.tabs[InitializeTab]
self._register_animations()
self.show_frame(InitializeTab)
def _create_menu(self):
self.header = tk.Frame(master=self, width=800, height=50, bg=MENU_COLOR)
self.header.grid(row=0, sticky='nwe')
self.header.grid_columnconfigure([0, 1, 2], weight=1)
self.header.grid_propagate(0)
img_list = [resource_filename('amber.resources', ('GUI/' + x)) for x in ('init.png', 'run.png', 'eval.png')]
canvas_list = []
image_list = []
text_ = ['Initialize', 'Train', 'Evaluate']
for i in range(len(img_list)):
assert os.path.isfile(img_list[i])
canvas = tk.Canvas(self.header, width=150, height=50, bd=0, highlightthickness=0, bg=MENU_COLOR)
image = Image.open(img_list[i]).resize((40, 40), Image.ANTIALIAS)
img = ImageTk.PhotoImage(image)
image_list.append(img)
canvas.create_image((5, 5), anchor='nw', image=img)
canvas.create_text((60, 20), anchor='nw', font=LARGE_FONT, text=text_[i])
canvas_list.append(canvas)
canvas.grid(row=0, column=i, sticky='we')
self.canvases = {self.tab_list[i]: canvas_list[i] for i in range(len(self.tab_list))}
self.image_list = image_list
def _register_animations(self):
self.animation_register = []
for F in self.tabs:
for (f, animate) in self.tabs[F].animation_register:
self.animation_register.append((f, animate))
def show_frame(self, cont):
frame = self.tabs[cont]
frame.tkraise()
for t in self.canvases:
canvas = self.canvases[t]
if (canvas == self.canvases[cont]):
canvas.config(bg=BODY_COLOR)
else:
canvas.config(bg=MENU_COLOR)
|
def beautify_status_update(tab):
var_dict = tab.var_dict
def colorify(p, widget=None):
if (p is None):
color = 'grey'
elif (p < 30):
color = 'green'
elif (30 <= p < 70):
color = 'orange'
elif (70 <= p < 100):
color = 'red'
else:
color = 'black'
if (widget is None):
return color
else:
widget.config({'fg': color})
def get_cpu_usage():
p = psutil.cpu_percent()
var_dict['cpu_status'][0].set(p)
colorify(p, var_dict['cpu_status'][2])
def get_ram():
d = psutil.virtual_memory()._asdict()
(usage, p) = (d['used'], d['percent'])
usage = round((usage / (1024.0 ** 3)), 1)
var_dict['ram'][0].set(('%iG; %s' % (usage, p)))
colorify(p, var_dict['ram'][2])
def get_gpu_usage():
if NUM_GPUS:
new_q = gpustat.GPUStatCollection.new_query().jsonify()
var_dict['gpu_status'][0].delete(1.0, tk.END)
color_list = []
for gpu_d in new_q['gpus']:
s = '{index}: {util}; {mem_used}/{mem_total}g \n'.format(index=gpu_d['index'], util=gpu_d['utilization.gpu'], mem_used=round((gpu_d['memory.used'] / 1024.0), 1), mem_total=round((gpu_d['memory.total'] / 1024.0), 1))
color_list.append(colorify(gpu_d['utilization.gpu']))
var_dict['gpu_status'][0].insert(tk.END, s)
for i in range(len(color_list)):
color = color_list[i]
var_dict['gpu_status'][0].tag_add(i, ('%i.0' % (i + 1)), ('%i.end' % (i + 1)))
var_dict['gpu_status'][0].tag_config(i, foreground=color)
def get_run_status():
if (tab.bn is None):
var_dict['run_status'][0].set('Waiting')
p = None
elif (tab.bn.poll() is None):
var_dict['run_status'][0].set('Running')
p = 30
elif (tab.bn.poll() == 0):
var_dict['run_status'][0].set('Finished')
p = 0
elif (tab.bn.poll() == 1):
var_dict['run_status'][0].set('Stopped')
p = 90
else:
var_dict['run_status'][0].set(('Code:%s' % tab.bn.poll()))
p = (- 1)
colorify(p, var_dict['run_status'][2])
func_map = {'cpu_status': get_cpu_usage, 'run_status': get_run_status, 'ram': get_ram, 'gpu_status': get_gpu_usage}
return func_map
|
class TrainTab(tk.Frame):
def __init__(self, parent, controller, global_wd, global_thread_spawner=None, prev_=None, next_=None):
tk.Frame.__init__(self, master=parent, bg=BODY_COLOR)
self.global_wd = global_wd
self.global_thread_spawner = global_thread_spawner
self.init_page = None
self.bn = None
self.var_dict = {}
self._create_header(controller, prev_, next_)
self._create_left_column()
self._create_demo_column()
self.animation_register = [(f, self._animate_r_bias)]
self.has_run = False
self.status_bar_fn_map = beautify_status_update(self)
self._update_status()
def _create_header(self, controller, prev_, next_):
self.header = tk.Frame(master=self, width=800, height=20, bg=BODY_COLOR)
self.header.grid(row=0, columnspan=5, sticky='nw')
button2 = tk.Button(self.header, text='Evaluate ->', command=(lambda : controller.show_frame(next_)), bg=BTN_BG)
button2.grid(row=0, column=2, sticky='w')
button1 = tk.Button(self.header, text='<- Initialize', command=(lambda : controller.show_frame(prev_)), bg=BTN_BG)
button1.grid(row=0, column=1, sticky='w')
def _create_left_column(self):
'\n TODO:\n - integrate with gpustat for monitoring GPU usage\n - update labels with dynamic statistics, e.g. entropy, loss/knowledge, learning rate, etc.\n Returns:\n None\n '
self.left_column = tk.Frame(master=self, width=300, height=600, bg=SIDEBAR_COLOR)
self.left_column.grid(row=1, column=0, columnspan=2, sticky='nw')
self.left_column.grid_rowconfigure(0, weight=1)
label = tk.Label(self.left_column, text='Train', font=LARGE_FONT, bg=BODY_COLOR, justify=tk.CENTER)
label.grid(row=0, column=0, columnspan=2, sticky='we')
button1 = tk.Button(self.left_column, text='Build', command=self._build, bg=BTN_BG)
button1.grid(row=6, column=0, columnspan=2, sticky='we')
button2 = tk.Button(self.left_column, text='Run', command=self._run, bg=BTN_BG)
button2.grid(row=7, column=0, columnspan=2, sticky='we')
button3 = tk.Button(self.left_column, text='Stop', command=self._stop, bg=BTN_BG)
button3.grid(row=8, column=0, columnspan=2, sticky='we')
status_bar = tk.Frame(master=self.left_column, width=300, bg=BODY_COLOR)
status_bar.grid(row=9, column=0, columnspan=2, sticky='nwe')
var_dict = {}
for (k, v) in STATUS_BAR_LAYOUT.items():
(str_var, widget, btn_var) = create_widget(v['value'], status_bar)
widget.grid(**v['wpos'])
if (str_var is not None):
var_dict[k] = (str_var, btn_var, widget)
label = tk.Label(status_bar, text=(k + ': '), bg=BODY_COLOR)
label.grid(**v['lpos'])
if ('default' in v):
if (type(str_var) is tk.Text):
str_var.delete(1.0, 'end')
str_var.insert(1.0, v['default'])
else:
str_var.set(v['default'])
self.var_dict.update(var_dict)
def _create_demo_column(self):
self.right_column = tk.Frame(master=self, width=500, height=600, bg=BODY_COLOR)
self.right_column.grid(row=1, column=2, columnspan=3, sticky='nswe')
canvas = FigureCanvasTkAgg(f, self.right_column)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0, columnspan=3, rowspan=1, sticky='nswe', padx=5, pady=5)
def _animate_r_bias(self, i):
a.clear()
a.set_xlim((0, 50))
a.set_title('Reward Moving Baseline')
try:
xList = []
yList = []
with open(os.path.join(self.global_wd.get(), 'buffers.txt'), 'r') as fh:
for line in fh:
if (not line.startswith('Episode')):
continue
ele = line.strip().split('\t')
x = ele[0].split(':')[1]
y = ele[2].split(':')[1]
xList.append(float(x))
yList.append(float(y))
a.plot(xList, yList)
a.set_xlim((0, (np.ceil((max(xList) / 50)) * 50)))
except FileNotFoundError:
pass
def _update_status(self):
for (k, v) in self.var_dict.items():
if (k in self.status_bar_fn_map):
self.status_bar_fn_map[k]()
self.after(REFRESH_INTERVAL, self._update_status)
def _build(self):
try:
var_dict = self.init_page.parse_vars(self.init_page)
except Exception as e:
messagebox.showinfo('Error', 'Build was unsuccessful. See command-line output for details.')
print(('Failed build caused by %s' % e))
return
(types, specs) = gui_mapper(var_dict)
fn = os.path.join(self.global_wd.get(), 'bionas_config.pkl')
with open(fn, 'wb') as f:
pickle.dump({'types': types, 'specs': specs}, f)
def _run(self):
fn = os.path.join(self.global_wd.get(), 'bionas_config.pkl')
if (not os.path.isfile(fn)):
messagebox.showinfo('Error', 'BioNAS has not been built; did you click on Build?')
return
if self.has_run:
messagebox.showinfo('Error', 'BioNAS has already been running in background.. please wait')
else:
self.has_run = True
cmd = ['python', '-c', ("import pickle;from BioNAS import BioNAS;d=pickle.load(open('%s','rb'));bn=BioNAS(**d);bn.run()" % fn)]
self.bn = self.global_thread_spawner(cmd)
def _stop(self):
if (not os.path.isfile(os.path.join(self.global_wd.get(), 'bionas_config.pkl'))):
messagebox.showinfo('Error', 'BioNAS has not been built; did you click on Build?')
return
if (not self.has_run):
messagebox.showinfo('Error', 'BioNAS is not running')
elif (self.bn.poll() is None):
self.bn.send_signal(signal.SIGINT)
elif (self.bn.poll() == 0):
messagebox.showinfo('Info', 'BioNAS finished running')
self.has_run = False
self.bn = None
else:
self.has_run = False
if (not self.bn.poll()):
self.bn.kill()
messagebox.showinfo('Info', 'BioNAS subprocess killed')
|
class welcome_page(tk.Toplevel):
def __init__(self, master, global_wd, *args, **kwargs):
super().__init__(master=master)
self.geometry('600x400+500+100')
self.title('BioNAS - Welcome')
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.grid_propagate(0)
self.wd = global_wd
self.frame = tk.Frame(master=self, width=600, height=400, bg=BODY_COLOR)
self.frame.grid(sticky='nsew')
self.frame.grid_propagate(0)
self.create_left_column()
self.create_right_column()
def create_left_column(self):
self.left_column = tk.Frame(master=self.frame, width=150, height=400, bg=MENU_COLOR)
self.left_column.grid(row=0, column=0, sticky='w')
self.left_column.grid_propagate(0)
self.left_column.grid_columnconfigure(0, weight=1)
button4 = tk.Button(master=self.left_column, text='Quit', command=self._quit, bg=BTN_BG, width=8)
button4.grid(row=0)
button2 = tk.Button(master=self.left_column, text='Browse', command=self._ask_folder, bg=BTN_BG, width=8)
button2.grid(row=1)
button3 = tk.Button(master=self.left_column, text='Open', command=self._confirm, bg=BTN_BG, width=8)
button3.grid(row=2)
sep = ttk.Separator(master=self.left_column, orient=tk.HORIZONTAL)
sep.grid(row=3, sticky='ew', pady=10)
lbl1 = tk.Label(master=self.left_column, text='Working Directory:', fg='white', bg=self.left_column['bg'])
lbl1.grid(row=4, pady=10)
lbl2 = tk.Label(master=self.left_column, textvariable=self.wd, fg='white', bg=self.left_column['bg'], justify=tk.LEFT)
lbl2.grid(row=5, sticky='we')
def _ask_folder(self):
filename = filedialog.askdirectory()
self.wd.set(filename)
print(filename)
def _confirm(self):
wd_now = self.wd.get()
if ((wd_now == 'None') or (not os.path.isdir(wd_now))):
print('bad directory')
messagebox.showinfo('I/O Error', ('Bad working directory: %s' % wd_now))
else:
self.master.title(('BioNAS - Main - %s' % wd_now))
self.master.deiconify()
self.destroy()
def _quit(self):
self.master.quit()
self.master.destroy()
def create_right_column(self):
self.right_column = tk.Frame(master=self.frame, width=450, height=400, bg='white')
self.right_column.grid(row=0, column=1, sticky='nswe')
self.right_column.pack_propagate(0)
logo_label = tk.Label(self.right_column, text=LOGO['ascii'], justify=tk.LEFT, font=LOGO['font'], fg=LOGO['color'], bg=self.right_column['bg'])
logo_label.grid(row=0, sticky='we', padx=10, pady=10)
label = tk.Label(self.right_column, text=WELCOME_MSG, font=LARGE_FONT, justify=tk.LEFT, fg='black', bg=self.right_column['bg'])
label.grid(row=1, sticky='nswe', padx=10, pady=10)
|
class LabelSeparator(tk.Frame):
def __init__(self, parent, text='', width='', *args):
tk.Frame.__init__(self, parent, *args, bg=BODY_COLOR)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
self.separator = ttk.Separator(self, orient=tk.HORIZONTAL)
self.separator.grid(row=0, column=0, sticky='ew')
self.label = tk.Label(self, text=text, bg=BODY_COLOR)
self.label.grid(row=0, column=0)
|
def create_widget(arg, master):
if ((type(arg) is list) and (len(arg) > 0)):
str_var = tk.StringVar(master)
str_var.set(arg[0])
btn_var = tk.StringVar(master)
btn_var.set(arg[0])
def set_fp(x):
if (btn_var.get() == 'Custom..'):
fp = filedialog.askopenfilename()
str_var.set(fp)
btn_var.set(os.path.basename(fp)[:15])
else:
str_var.set(btn_var.get())
pop = tk.OptionMenu(master, btn_var, *arg, command=set_fp)
pop.config(width=10, bg=BODY_COLOR, justify=tk.LEFT)
return (str_var, pop, btn_var)
elif ((type(arg) is str) and arg.startswith('[') and arg.endswith(']')):
(w, h) = arg.strip('[]').split(',')
str_var = tk.Text(master, width=int(w), height=int(h), bg=TEXT_BG)
return (str_var, str_var, str_var)
elif ((type(arg) is str) and arg.startswith('{') and arg.endswith('}')):
(w, h) = arg.strip('{}').split(',')
assert (int(h) == 1), 'tk.Entry widget only has height=1'
str_var = tk.StringVar(master, value='')
widget = tk.Entry(master, width=int(w), textvariable=str_var, state='readonly', bg=TEXT_BG)
return (str_var, widget, str_var)
elif ((type(arg) is str) and arg.startswith('--')):
sep = LabelSeparator(parent=master, text=arg.strip('-'))
return (None, sep, None)
elif ((type(arg) is str) and (arg == 'Custom..')):
str_var = tk.StringVar(master, value='None')
btn_txt_var = tk.StringVar(master, value='Choose..')
def set_text():
fp = filedialog.askopenfilename()
str_var.set(fp)
btn_txt_var.set(os.path.basename(fp)[:15])
btn = tk.Button(master, textvariable=btn_txt_var, command=set_text, bg=BTN_BG)
return (str_var, btn, btn_txt_var)
else:
raise Exception(('Failed to create widget: %s' % arg))
|
def load_full_model(modelPath):
model = load_model(modelPath, custom_objects=custom_objects)
return model
|
def get_models_from_hist_by_load(hist_idx, hist):
model_dict = {}
for idx in hist_idx:
model_dict[idx] = load_full_model(('%s/weights/trial_%i/full_bestmodel.h5' % (hist.iloc[idx].dir, hist.iloc[idx].ID)))
return model_dict
|
def get_best_model(state_space, controller, working_directory):
'Given a controller and a state_space, find the best model states in its\n state space\n\n Returns:\n dict: a dict of conditions for selected best model(s)\n '
return
|
def get_hist_index_by_conditions(condition_dict, hist, complementary=False):
'Get a set of indices for models that satisfy certain\n conditions from a hist file\n '
sign_dict = {'==': (lambda x, y: (x == y)), '>': (lambda x, y: (x > y)), '<': (lambda x, y: (x < y)), '>=': (lambda x, y: (x >= y)), '<=': (lambda x, y: (x <= y)), '!=': (lambda x, y: (x != y))}
condition_bool = []
for cond in condition_dict:
condition_bool.append(sign_dict[condition_dict[cond][0]](hist[cond], condition_dict[cond][1]))
if complementary:
subset = np.where((~ np.all(np.array(condition_bool), axis=0)))[0]
else:
subset = np.where(np.all(np.array(condition_bool), axis=0))[0]
return subset
|
def get_models_from_hist(hist_idx, hist, input_state, output_state, state_space, model_compile_dict):
'Given a set of indcies, build a dictionary of\n models from history file\n '
model_dict = {}
for idx in hist_idx:
model_state_str = [hist.iloc[idx][('L%i' % (i + 1))] for i in range((hist.shape[1] - 5))]
model_dict[idx] = model_fn.build_sequential_model_from_string(model_state_str, input_state, output_state, state_space, model_compile_dict)
model_dict[idx].load_weights(('%s/weights/trial_%i/bestmodel.h5' % (hist.iloc[idx].dir, hist.iloc[idx].ID)))
return model_dict
|