code
stringlengths 17
6.64M
|
---|
def get_multi_gpu_models_from_hist(hist_idx, hist, input_state, output_state, state_space, model_compile_dict):
'Given a set of indcies, build a dictionary of\n models from history file\n '
model_dict = {}
for idx in hist_idx:
model_state_str = [hist.iloc[idx][('L%i' % (i + 1))] for i in range((hist.shape[1] - 5))]
model_dict[idx] = model_fn.build_multi_gpu_sequential_model_from_string(model_state_str, input_state, output_state, state_space, model_compile_dict)
model_dict[idx].load_weights(('%s/weights/trial_%i/bestmodel.h5' % (hist.iloc[idx].dir, hist.iloc[idx].ID)))
return model_dict
|
def match_quantity(q, p, num_slice=10, num_sample_per_slice=None, replace=False, random_state=777):
'from p samples a new distribution that matches q\n\n Returns:\n np.array: sampled idx in p\n '
np.random.seed(random_state)
if (not num_sample_per_slice):
num_sample_per_slice = int(np.ceil((len(q) / num_slice)))
q_quantiles = np.percentile(q, q=np.arange(0, (100 + (100 / num_slice)), (100 / num_slice)))
new_p_idx = []
for (q_s, q_e) in zip(q_quantiles[:(- 1)], q_quantiles[1:]):
t = np.random.choice(np.where(((p >= q_s) & (p <= q_e)))[0], num_sample_per_slice, replace=replace)
new_p_idx.extend(t)
new_p_idx = np.array(new_p_idx)
return new_p_idx
|
class InfoConstraint(Constraint):
def __call__(self, w):
w = (w * K.cast(K.greater_equal(w, 0), K.floatx()))
w = (w * (2 / K.maximum(K.sum(w, axis=1, keepdims=True), 2)))
return w
|
class NegativeConstraint(Constraint):
def __call__(self, w):
w = (w * K.cast(K.less_equal(w, 0), K.floatx()))
return w
|
def filter_reg(w, lambda_filter, lambda_l1):
filter_penalty = (lambda_filter * K.sum(K.l2_normalize(K.sum(w, axis=0), axis=1)))
weight_penalty = (lambda_l1 * K.sum(K.abs(w)))
return (filter_penalty + weight_penalty)
|
def pos_reg(w, lambda_pos, filter_len):
location_lambda = (K.cast(K.concatenate([K.arange((filter_len / 2), stop=0, step=(- 1)), K.arange(start=1, stop=((filter_len / 2) + 1))]), 'float32') * (lambda_pos / (filter_len / 2)))
location_penalty = K.sum((location_lambda * K.sum(K.abs(w), axis=(0, 2, 3))))
return location_penalty
|
def total_reg(w, lambda_filter, lambda_l1, lambda_pos, filter_len):
return (filter_reg(w, lambda_filter, lambda_l1) + pos_reg(w, lambda_pos, filter_len))
|
def Layer_deNovo(filters, kernel_size, strides=1, padding='valid', activation='sigmoid', lambda_pos=0.003, lambda_l1=0.003, lambda_filter=1e-08, name='denovo'):
return Conv2D(filters, (4, kernel_size), strides=strides, padding=padding, activation=activation, kernel_initializer=normal(0, 0.5), bias_initializer='zeros', kernel_regularizer=(lambda w: total_reg(w, lambda_filter, lambda_l1, lambda_pos, kernel_size)), kernel_constraint=InfoConstraint(), bias_constraint=NegativeConstraint(), name=name)
|
class DenovoConvMotif(Layer):
'\n De Novo motif learning Convolutional layers, as described in\n https://github.com/mPloenzke/learnMotifs/blob/master/R/layer_denovoMotif.R\n by Matthew Ploenzke, Raphael Irrizarry. Accessed 2018.12.05\n '
def __init__(self, output_dim, filters, filter_len, lambda_pos=0, lambda_filter=0, lambda_l1=0, lambda_offset=0, strides=(1, 1), padding='valid', activation='sigmoid', use_bias=True, kernel_initializer=None, bias_initializer='zeros', kernel_regularizer=None, kernel_constraint=None, bias_constraint=None, input_shape=None, name='deNovo_conv', trainable=None, **kwargs):
super(DenovoConvMotif, self).__init__(**kwargs)
raise NotImplementedError()
def build(self, input_shape):
'\n R-code:\n .. code-block:: R\n lambda_filter <<- lambda_filter\n lambda_l1 <<- lambda_l1\n lambda_pos <<- lambda_pos\n filter_len <<- filter_len\n keras::create_layer(keras:::keras$layers$Conv2D, object, list(filters = as.integer(filters),\n kernel_size = keras:::as_integer_tuple(c(4,filter_len)), strides = keras:::as_integer_tuple(strides),\n padding = padding, data_format = NULL, dilation_rate = c(1L, 1L),\n activation = activation, use_bias = use_bias, kernel_initializer = kernel_initializer,\n bias_initializer = bias_initializer, kernel_regularizer = kernel_regularizer,\n bias_regularizer = keras::regularizer_l1(l=lambda_offset), activity_regularizer = NULL,\n kernel_constraint = kernel_constraint, bias_constraint = bias_constraint,\n input_shape = keras:::normalize_shape(input_shape), batch_input_shape = keras:::normalize_shape(NULL),\n batch_size = keras:::as_nullable_integer(NULL), dtype = NULL,\n name = name, trainable = trainable, weights = NULL))\n '
self.built = True
|
def sparsek_vec(x):
convmap = x
shape = tf.shape(convmap)
nb = shape[0]
nl = shape[1]
tk = tf.cast((0.2 * tf.cast(nl, tf.float32)), tf.int32)
convmapr = tf.reshape(convmap, tf.stack([nb, (- 1)]))
(th, _) = tf.nn.top_k(convmapr, tk)
th1 = tf.slice(th, [0, (tk - 1)], [(- 1), 1])
thr = tf.reshape(th1, tf.stack([nb, 1]))
drop = tf.where((convmap < thr), tf.zeros([nb, nl], tf.float32), tf.ones([nb, nl], tf.float32))
convmap = (convmap * drop)
return convmap
|
def get_features_in_block(spatial_outputs, f_kmean_assign):
n_clusters = (np.max(f_kmean_assign) + 1)
block_ops = []
for c in range(n_clusters):
i = np.where((f_kmean_assign == c))[0]
block_ops.append(tf.gather(spatial_outputs, indices=i, axis=(- 1), batch_dims=0))
return block_ops
|
class CnnFeatureModel():
'\n Convert the last Conv layer in a Cnn model as features to a Dnn model\n Keep all Tensors for future Operations\n '
def __init__(self, base_model, session, feature_assign_fn, feature_map_orientation='channels_last', x=None, y=None, batch_size=128, target_layer=None, trainable=None, name='CnnFeatureModel'):
self.name = name
layer_dict = {l.name: l for l in base_model.layers}
base_model.trainable = (trainable or False)
if (target_layer is None):
target_layer = sorted([k for k in layer_dict if k.startswith('conv')])[(- 1)]
self.session = session
self.base_model = base_model
self.x_inputs = base_model.inputs
self.spatial_outputs = layer_dict[target_layer].output
self.total_feature_num = np.prod(self.spatial_outputs.shape[1:]).value
if (feature_map_orientation == 'channels_last'):
self.orient = [0, 2, 1]
elif (feature_map_orientation == 'channels_first'):
self.orient = [0, 1, 2]
else:
raise Exception(('cannot understand feature_map_orientation: %s' % feature_map_orientation))
self.outputs = tf.reshape(tf.transpose(self.spatial_outputs, self.orient), [(- 1), self.total_feature_num])
self.load_feature_blocks(feature_assign_fn)
self.batch_size = None
self.data_gen = None
self.x_it = None
self.y_it = None
self.x_ph = None
self.y_ph = None
self.pseudo_inputs_pipe = None
def get_data_pipeline_feedable(self, label_shapes, batch_size=None):
if (batch_size is None):
batch_size = 32
self.x_ph = [tf.placeholder(shape=self.base_model.inputs[i].shape, dtype=np.float32, name=('base_input_%i' % i)) for i in range(len(self.base_model.inputs))]
self.y_ph = [tf.placeholder(shape=label_shapes[i], dtype=np.float32, name=('base_output_%i' % i)) for i in range(len(label_shapes))]
self.batch_size = batch_size
dataset = tf.data.Dataset.from_tensor_slices(tuple((self.x_ph + self.y_ph)))
self.data_gen = dataset.repeat().shuffle(self.batch_size).batch(self.batch_size).make_initializable_iterator()
next_ele = self.data_gen.get_next()
self.x_it = list(next_ele[:len(self.x_ph)])
self.y_it = list(next_ele[len(self.x_ph):])
self.base_model.layers.pop(0)
spatial_outputs_pipe = self.base_model(self.x_it)
block_ops = get_features_in_block(spatial_outputs_pipe, self.f_assign)
self.pseudo_inputs_pipe = tf.concat([tf.reshape(tf.transpose(x, self.orient), [(- 1), np.prod(x.shape[1:]).value]) for x in block_ops], axis=1)
def predict(self, x_, keep_spatial=True):
if (type(x_) is not list):
x_ = [x_]
if keep_spatial:
return self.session.run(self.spatial_outputs, feed_dict={self.x_inputs[i]: x_[i] for i in range(len(x_))})
else:
return self.session.run(self.outputs, feed_dict={self.x_inputs[i]: x_[i] for i in range(len(x_))})
def load_feature_blocks(self, feature_assign_fn):
f_assign = np.load(feature_assign_fn)
block_ops = get_features_in_block(self.spatial_outputs, f_assign)
self.input_blocks = [tf.reshape(tf.transpose(x, self.orient), [(- 1), np.prod(x.shape[1:]).value]) for x in block_ops]
self.pseudo_inputs = tf.concat(self.input_blocks, axis=1)
self.input_node_for_nas = [State('input', shape=(self.input_blocks[i].shape[1].value,), name=('Input_%i' % i)) for i in range(len(block_ops))]
self.f_assign = f_assign
|
class MultiIOArchitecture():
def __init__(self, num_layers, num_inputs, num_outputs):
self.num_layers = num_layers
self.num_inputs = num_inputs
self.num_outputs = num_outputs
def decode(self, arc_seq):
start_idx = 0
operations = []
inputs = []
skips = []
for layer_id in range(self.num_layers):
operation = arc_seq[start_idx]
start_idx += 1
inp = arc_seq[start_idx:(start_idx + self.num_inputs)]
if (layer_id > 0):
skip = arc_seq[(start_idx + self.num_inputs):((start_idx + self.num_inputs) + layer_id)]
skips.append(skip)
operations.append(operation)
inputs.append(inp)
start_idx += (self.num_inputs + layer_id)
inputs = np.asarray(inputs)
outputs = np.asarray(arc_seq[start_idx:]).reshape(((- 1), self.num_layers))
return (operations, inputs, skips, outputs)
|
class ResConvNetArchitecture():
def __init__(self, model_space):
'ResConvNetArchitecture is a class for decoding and encoding neural architectures of convolutional neural\n networks with residual connections\n\n Parameters\n ----------\n model_space : amber.architect.ModelSpace\n The model space which architectures are being sampled from\n '
self.model_space = model_space
self._num_layers = len(self.model_space)
def decode(self, arc_seq):
'Decode a sequence of architecture tokens into operations and res-connections\n '
start_idx = 0
operations = []
res_con = []
for layer_id in range(self._num_layers):
operations.append(arc_seq[start_idx])
if (layer_id > 0):
res_con.append(arc_seq[(start_idx + 1):((start_idx + layer_id) + 1)])
start_idx += (layer_id + 1)
return (operations, res_con)
def encode(self, operations, res_con):
'Encode operations and residual connections to a sequence of architecture tokens\n\n This is the inverse function for `decode`\n\n Parameters\n ----------\n operations : list\n A list of integers for categorically-encoded operations\n res_con : list\n A list of list where each entry is a binary-encoded residual connections\n '
operations_ = list(operations)
arc_seq = [operations_.pop(0)]
for (op, res) in zip(operations_, res_con):
arc_seq.append(op)
arc_seq.extend(res)
return arc_seq
|
def get_dag(arg):
"Getter method for getting a DAG class from a string\n\n DAG refers to the underlying tensor computation graphs for child models. Whenever possible, we prefer to use Keras\n Model API to get the job done. For ENAS, the parameter-sharing scheme is implemented by tensorflow.\n\n Parameters\n ----------\n arg : str or callable\n return the DAG constructor corresponding to that identifier; if is callable, assume it's a DAG constructor\n already, do nothing and return it\n\n Returns\n -------\n callable\n A DAG constructor\n "
if (arg is None):
return None
elif (type(arg) is str):
if (arg.lower() == 'dag'):
return DAG
elif (arg.lower() == 'inputblockdag'):
return InputBlockDAG
elif (arg.lower() == 'inputblockauxlossdag'):
return InputBlockAuxLossDAG
elif (arg.lower() == 'enasanndag'):
return EnasAnnDAG
elif (arg.lower() == 'enasconv1ddag'):
return EnasConv1dDAG
elif (arg == 'EnasConv1DwDataDescrption'):
return EnasConv1DwDataDescrption
elif callable(arg):
return arg
else:
raise ValueError('Could not understand the DAG func:', arg)
|
def get_layer(x, state, with_bn=False):
'Getter method for a Keras layer, including native Keras implementation and custom layers that are not included in\n Keras.\n\n Parameters\n ----------\n x : tf.keras.layers or None\n The input Keras layer\n state : amber.architect.Operation\n The target layer to be built\n with_bn : bool, optional\n If true, add batch normalization layers before activation\n\n Returns\n -------\n x : tf.keras.layers\n The built target layer connected to input x\n '
if (state.Layer_type == 'dense'):
if (with_bn is True):
actv_fn = state.Layer_attributes.pop('activation', 'linear')
x = Dense(**state.Layer_attributes)(x)
x = BatchNormalization()(x)
x = Activation(actv_fn)(x)
return x
else:
return Dense(**state.Layer_attributes)(x)
elif (state.Layer_type == 'sfc'):
return SeparableFC(**state.Layer_attributes)(x)
elif (state.Layer_type == 'input'):
return Input(**state.Layer_attributes)
elif (state.Layer_type == 'embedding'):
return Embedding(**state.Layer_attributes)
elif (state.Layer_type == 'conv1d'):
if (with_bn is True):
actv_fn = state.Layer_attributes.pop('activation', 'linear')
x = Conv1D(**state.Layer_attributes)(x)
x = BatchNormalization()(x)
x = Activation(actv_fn)(x)
return x
else:
return Conv1D(**state.Layer_attributes)(x)
elif (state.Layer_type == 'denovo'):
x = Lambda((lambda t: K.expand_dims(t)))(x)
x = Permute(dims=(2, 1, 3))(x)
x = Layer_deNovo(**state.Layer_attributes)(x)
x = Lambda((lambda t: K.squeeze(t, axis=1)))(x)
return x
elif (state.Layer_type == 'sparsek_vec'):
x = Lambda(sparsek_vec, **state.Layer_attributes)(x)
return x
elif (state.Layer_type == 'maxpool1d'):
return MaxPooling1D(**state.Layer_attributes)(x)
elif (state.Layer_type == 'avgpool1d'):
return AveragePooling1D(**state.Layer_attributes)(x)
elif (state.Layer_type == 'lstm'):
return LSTM(**state.Layer_attributes)(x)
elif (state.Layer_type == 'flatten'):
return Flatten()(x)
elif (state.Layer_type == 'globalavgpool1d'):
return GlobalAveragePooling1D()(x)
elif (state.Layer_type == 'globalmaxpool1d'):
return GlobalMaxPooling1D()(x)
elif (state.Layer_type == 'dropout'):
return Dropout(**state.Layer_attributes)(x)
elif (state.Layer_type == 'identity'):
return Lambda((lambda t: t), **state.Layer_attributes)(x)
elif (state.Layer_type == 'gaussian_noise'):
return GaussianNoise(**state.Layer_attributes)(x)
elif (state.Layer_type == 'concatenate'):
return Concatenate(**state.Layer_attributes)(x)
else:
raise Exception(('Layer_type "%s" is not understood' % state.Layer_type))
|
class ComputationNode():
def __init__(self, operation, node_name, merge_op=Concatenate):
assert (type(operation) is State), ('Expect operation is of type amber.architect.State, got %s' % type(operation))
self.operation = operation
self.node_name = node_name
self.merge_op = merge_op
self.parent = []
self.child = []
self.operation_layer = None
self.merge_layer = None
self.is_built = False
def build(self):
'Build the keras layer with merge operations if applicable\n\n Notes\n -----\n when building a node, its parents must all be built already\n '
if self.parent:
if (len(self.parent) > 1):
self.merge_layer = self.merge_op()([p.operation_layer for p in self.parent])
else:
self.merge_layer = self.parent[0].operation_layer
self.operation.Layer_attributes['name'] = self.node_name
self.operation_layer = get_layer(self.merge_layer, self.operation)
self.is_built = True
|
class DAG():
def __init__(self, arc_seq, num_layers, model_space, input_node, output_node, with_skip_connection=True, with_input_blocks=True):
assert all([(not x.is_built) for x in input_node]), 'input_node must not have been built'
if (type(output_node) is list):
assert all([(not x.is_built) for x in output_node]), 'output_node must not have been built'
assert (len(output_node) == 1)
output_node = output_node[0]
else:
assert (not output_node.is_built), 'output_node must not have been built'
self.arc_seq = np.array(arc_seq)
self.num_layers = num_layers
self.model_space = model_space
self.input_node = input_node
self.output_node = output_node
self.with_skip_connection = with_skip_connection
self.with_input_blocks = with_input_blocks
self.model = None
self.nodes = []
def _build_dag(self):
if self.with_input_blocks:
assert (type(self.input_node) in (list, tuple)), ('If ``with_input_blocks=True``, ``input_node`` must be array-like. Current type of input_node is %s and with_input_blocks=%s' % (type(self.input_node), self.with_input_blocks))
assert (type(self.output_node) is ComputationNode)
nodes = self._init_nodes()
nodes = self._prune_nodes(nodes)
node_list = ((self.input_node + nodes) + [self.output_node])
for node in node_list:
try:
node.build()
except Exception as e:
print(node.node_name)
print([x.node_name for x in node.parent])
print([x.node_name for x in node.child])
raise e
self.model = Model(inputs=[x.operation_layer for x in self.input_node], outputs=[self.output_node.operation_layer])
self.nodes = nodes
return self.model
def _init_nodes(self):
'first read through the architecture sequence to initialize the nodes'
arc_pointer = 0
nodes = []
for layer_id in range(self.num_layers):
arc_id = self.arc_seq[arc_pointer]
op = self.model_space[layer_id][arc_id]
parent = []
node_ = ComputationNode(op, node_name=('L%i_%s' % (layer_id, get_layer_shortname(op))))
if self.with_input_blocks:
inp_bl = np.where((self.arc_seq[(arc_pointer + 1):((arc_pointer + 1) + len(self.input_node))] == 1))[0]
for i in inp_bl:
parent.append(self.input_node[i])
self.input_node[i].child.append(node_)
elif (layer_id == 0):
for n in self.input_node:
n.child.append(node_)
parent.append(n)
if (self.with_skip_connection and (layer_id > 0)):
skip_con = np.where((self.arc_seq[((arc_pointer + 1) + (len(self.input_node) * self.with_input_blocks)):(((arc_pointer + 1) + (len(self.input_node) * self.with_input_blocks)) + layer_id)] == 1))[0]
for i in skip_con:
parent.append(nodes[i])
nodes[i].child.append(node_)
elif (layer_id > 0):
parent.append(nodes[(- 1)])
nodes[(- 1)].child.append(node_)
node_.parent = parent
nodes.append(node_)
arc_pointer += ((1 + (int(self.with_input_blocks) * len(self.input_node))) + (int(self.with_skip_connection) * layer_id))
return nodes
def _prune_nodes(self, nodes):
'now need to deal with loose-ends: node with no parent, or no child\n '
default_input_node = ComputationNode(State('dense', units=1, activation='linear', name='default_input'), node_name='default_input')
default_input_node.parent = [x for x in self.input_node if (len(x.child) == 0)]
if default_input_node.parent:
for x in self.input_node:
if (len(x.child) == 0):
x.child.append(default_input_node)
has_default = True
else:
has_default = False
is_default_intermediate = False
tmp_nodes = []
for node in nodes:
node.parent = [x for x in node.parent if ((x in tmp_nodes) or (x in self.input_node))]
if (not node.parent):
if has_default:
node.parent.append(default_input_node)
default_input_node.child.append(node)
is_default_intermediate = True
else:
continue
if (not node.child):
self.output_node.parent.append(node)
node.child.append(self.output_node)
tmp_nodes.append(node)
nodes = tmp_nodes
if (has_default and (not is_default_intermediate)):
default_input_node.child.append(self.output_node)
self.output_node.parent.append(default_input_node)
if has_default:
nodes = ([default_input_node] + nodes)
return nodes
|
class InputBlockDAG(DAG):
def __init__(self, add_output=True, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.with_input_blocks, '`InputBlockDAG` class only handles `with_input_blocks=True`'
self.added_output_nodes = []
self.add_output = add_output
def _build_dag(self):
assert (type(self.input_node) in (list, tuple)), ('If ``with_input_blocks=True``, ``input_node`` must be array-like. Current type of input_node is %s and with_input_blocks=%s' % (type(self.input_node), self.with_input_blocks))
assert (type(self.output_node) is ComputationNode)
nodes = self._init_nodes()
nodes = self._prune_nodes(nodes)
node_list = (((self.input_node + nodes) + self.added_output_nodes) + [self.output_node])
for node in node_list:
try:
node.build()
except:
print(node.node_name)
print([x.node_name for x in node.parent])
print([x.node_name for x in node.child])
raise Exception('above')
self.nodes = nodes
self.model = DenseAddOutputChild(inputs=[x.operation_layer for x in self.input_node], outputs=([self.output_node.operation_layer] + [n.operation_layer for n in self.added_output_nodes]), nodes=self.nodes)
return self.model
def _init_nodes(self):
'first read through the architecture sequence to initialize the nodes,\n whenever a input block is connected, add an output tensor afterwards\n '
arc_pointer = 0
nodes = []
for layer_id in range(self.num_layers):
arc_id = self.arc_seq[arc_pointer]
op = self.model_space[layer_id][arc_id]
parent = []
node_ = ComputationNode(op, node_name=('L%i_%s' % (layer_id, get_layer_shortname(op))))
inp_bl = np.where((self.arc_seq[(arc_pointer + 1):((arc_pointer + 1) + len(self.input_node))] == 1))[0]
if any(inp_bl):
for i in inp_bl:
parent.append(self.input_node[i])
self.input_node[i].child.append(node_)
if (self.add_output and (layer_id != (self.num_layers - 1))):
if (type(self.output_node) is list):
assert (len(self.output_node) == 1)
self.output_node = self.output_node[0]
new_out = ComputationNode(operation=self.output_node.operation, node_name=('added_out_%i' % (len(self.added_output_nodes) + 1)))
new_out.parent.append(node_)
self.added_output_nodes.append(new_out)
if (self.with_skip_connection and (layer_id > 0)):
skip_con = np.where((self.arc_seq[((arc_pointer + 1) + (len(self.input_node) * self.with_input_blocks)):(((arc_pointer + 1) + (len(self.input_node) * self.with_input_blocks)) + layer_id)] == 1))[0]
for i in skip_con:
parent.append(nodes[i])
nodes[i].child.append(node_)
elif (layer_id > 0):
parent.append(nodes[(- 1)])
nodes[(- 1)].child.append(node_)
node_.parent = parent
nodes.append(node_)
arc_pointer += ((1 + (int(self.with_input_blocks) * len(self.input_node))) + (int(self.with_skip_connection) * layer_id))
return nodes
|
class InputBlockAuxLossDAG(InputBlockDAG):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.add_output, 'InputBlockAuxLossDAG must have `add_output=True`'
self.input_block_loss_mapping = {}
def _build_dag(self):
assert (type(self.input_node) in (list, tuple)), ('If ``with_input_blocks=True``, ``input_node`` must be array-like. Current type of input_node is %s and with_input_blocks=%s' % (type(self.input_node), self.with_input_blocks))
assert (type(self.output_node) is ComputationNode)
nodes = self._init_nodes()
nodes = self._prune_nodes(nodes)
node_list = (((self.input_node + nodes) + self.added_output_nodes) + [self.output_node])
for node in node_list:
try:
node.build()
except Exception as e:
print(node.node_name)
print([x.node_name for x in node.parent])
print([x.node_name for x in node.child])
raise e
self.nodes = nodes
self.model = DenseAddOutputChild(inputs=[x.operation_layer for x in self.input_node], outputs=([self.output_node.operation_layer] + [n.operation_layer for n in self.added_output_nodes]), nodes=self.nodes, block_loss_mapping=self.input_block_loss_mapping)
return self.model
def _aux_loss(self, nodes):
input_blocks = [x.node_name for x in self.input_node]
inp_heads = {inp: None for inp in input_blocks}
inp_pair_roots = {(b1, b2): 'None' for b1 in input_blocks for b2 in input_blocks}
leaves_cardinality = {n.node_name: set([]) for n in nodes}
for n in nodes:
_inputs = [x.node_name for x in n.parent if (x.operation.Layer_type == 'input')]
inp_heads.update({x: n.node_name for x in _inputs})
_ops = set([x.node_name for x in n.parent if (x.operation.Layer_type != 'input')])
for leaf in (_inputs + [l for x in _ops for l in leaves_cardinality[x]]):
leaves_cardinality[n.node_name].add(leaf)
inp_heads.update({x: n.node_name for x in input_blocks if (inp_heads[x] in _ops)})
for b1 in input_blocks:
for b2 in input_blocks:
if (inp_pair_roots[(b1, b2)] != 'None'):
continue
head1 = inp_heads[b1]
head2 = inp_heads[b2]
if (head1 == head2 == n.node_name):
inp_pair_roots[(b1, b2)] = n.node_name
aux_loss_nodes = []
layer2loss = {}
node_index = {node.node_name: node for node in nodes}
for t in sorted(set(inp_pair_roots.values())):
if ((t == 'None') or (node_index[t] == nodes[(- 1)])):
layer2loss[t] = None
continue
else:
new_out = ComputationNode(operation=self.output_node.operation, node_name=('add_out_%i' % (len(aux_loss_nodes) + 1)))
new_out.parent.append(node_index[t])
aux_loss_nodes.append(new_out)
layer2loss[t] = (len(aux_loss_nodes) + 1)
self.added_output_nodes = aux_loss_nodes
for (b1, b2) in inp_pair_roots:
self.input_block_loss_mapping[(b1, b2)] = layer2loss[inp_pair_roots[(b1, b2)]]
return
def _init_nodes(self):
arc_pointer = 0
nodes = []
for layer_id in range(self.num_layers):
arc_id = self.arc_seq[arc_pointer]
op = self.model_space[layer_id][arc_id]
parent = []
node_ = ComputationNode(op, node_name=('L%i_%s' % (layer_id, get_layer_shortname(op))))
inp_bl = np.where((self.arc_seq[(arc_pointer + 1):((arc_pointer + 1) + len(self.input_node))] == 1))[0]
if any(inp_bl):
for i in inp_bl:
parent.append(self.input_node[i])
self.input_node[i].child.append(node_)
if (self.with_skip_connection and (layer_id > 0)):
skip_con = np.where((self.arc_seq[((arc_pointer + 1) + (len(self.input_node) * self.with_input_blocks)):(((arc_pointer + 1) + (len(self.input_node) * self.with_input_blocks)) + layer_id)] == 1))[0]
for i in skip_con:
parent.append(nodes[i])
nodes[i].child.append(node_)
elif (layer_id > 0):
parent.append(nodes[(- 1)])
nodes[(- 1)].child.append(node_)
node_.parent = parent
nodes.append(node_)
arc_pointer += ((1 + (int(self.with_input_blocks) * len(self.input_node))) + (int(self.with_skip_connection) * layer_id))
self._aux_loss(nodes)
return nodes
|
class EnasAnnDAG():
def __init__(self, model_space, input_node, output_node, model_compile_dict, session, l1_reg=0.0, l2_reg=0.0, with_skip_connection=True, with_input_blocks=True, with_output_blocks=False, controller=None, feature_model=None, feature_model_trainable=None, child_train_op_kwargs=None, name='EnasDAG'):
'\n EnasAnnDAG is a DAG model builder for using the weight sharing framework. This class deals with the vanilla\n Artificial neural network. The weight sharing is between all Ws for different hidden units sizes - that is,\n a larger hidden size always includes the smaller ones.\n Args:\n model_space:\n input_node:\n output_node:\n model_compile_dict: compile dict for child models\n session: tf.Session\n with_skip_connection:\n with_input_blocks:\n name:\n '
self.model_space = model_space
if (not (type(input_node) in (tuple, list))):
self.input_node = [input_node]
else:
self.input_node = input_node
if (not (type(output_node) in (tuple, list))):
self.output_node = [output_node]
else:
self.output_node = output_node
if (session is None):
self.session = tf.compat.v1.Session()
else:
self.session = session
self.model_compile_dict = model_compile_dict
self.l1_reg = l1_reg
self.l2_reg = l2_reg
self.with_skip_connection = with_skip_connection
self.with_input_blocks = with_input_blocks
self.with_output_blocks = with_output_blocks
self.num_layers = len(model_space)
self.name = name
self.input_arc = None
self.sample_w_masks = []
self.feature_model = feature_model
if (self.feature_model is None):
self.feature_model_trainable = False
else:
self.feature_model_trainable = (feature_model_trainable or False)
self.child_train_op_kwargs = child_train_op_kwargs
self._verify_args()
self._create_params()
if (controller is None):
self.controller = None
else:
self.controller = controller
self._build_sample_arc()
self._build_fixed_arc()
vars = [v for v in tf.all_variables() if v.name.startswith(self.name)]
self.session.run(tf.initialize_variables(vars))
self.train_fixed_arc = False
def __call__(self, arc_seq, node_builder=None, *args, **kwargs):
model = self._model(arc_seq)
if ((node_builder is not None) and (arc_seq is not None)):
nb = node_builder(arc_seq)
nodes = nb._init_nodes()
nodes = nb._prune_nodes(nodes)
nodes = ((nb.input_node + nodes) + [nb.output_node])
model.nodes = nodes
return model
def set_controller(self, controller):
assert (self.controller is None), 'already has inherent controller, disallowed; start a new EnasAnnDAG instance if you want to connect another controller'
self.controller = controller
self._build_sample_arc()
vars = [v for v in tf.all_variables() if v.name.startswith(('%s/sample' % self.name))]
self.session.run(tf.initialize_variables(vars))
def _verify_args(self):
'verify vanilla ANN model space, input nodes, etc.,\n and configure internal attr. like masking steps'
if ((not self.with_output_blocks) and (len(self.output_node) > 1)):
warnings.warn(('You specified `with_output_blocks=False`, but gave a List of output operations of length %i' % len(self.output_node)), stacklevel=2)
assert (len(set([tuple(self.model_space[i]) for i in range(self.num_layers)])) == 1), 'model_space for EnasDAG must be identical for all layers'
layer_ = self.model_space[0]
all_actv_fns = set([x.Layer_attributes['activation'] for x in layer_])
assert (len(all_actv_fns) == 1), ('all operations must share the same activation function, got %s' % all_actv_fns)
self._actv_fn = all_actv_fns.pop()
self._weight_units = np.array([x.Layer_attributes['units'] for x in layer_], dtype=np.int32)
self._weight_max_units = np.max(self._weight_units)
self._input_block_map = np.zeros((len(self.input_node), 2), dtype=np.int32)
self.num_input_blocks = len(self.input_node)
start_idx = 0
for i in range(len(self.input_node)):
n_feature = self.input_node[i].Layer_attributes['shape'][0]
self._input_block_map[i] = [start_idx, (start_idx + n_feature)]
start_idx += n_feature
self._feature_max_size = start_idx
self._child_output_size = [n.Layer_attributes['units'] for n in self.output_node]
self._child_output_func = [n.Layer_attributes['activation'] for n in self.output_node]
self.num_output_blocks = len(self.output_node)
self._output_block_map = np.array([[(i * self._weight_max_units), ((i + 1) * self._weight_max_units)] for i in range(self.num_layers)], dtype=np.int32).reshape((self.num_layers, 2))
self._skip_conn_map = {}
start_map = np.array([[0, self._weight_max_units]], dtype=np.int32).reshape((1, 2))
for i in range(1, self.num_layers):
self._skip_conn_map[i] = start_map
start_map = np.concatenate([start_map, np.array([[(i * self._weight_max_units), ((i + 1) * self._weight_max_units)]], dtype=np.int32).reshape(1, 2)])
def _create_params(self):
self.w = []
self.b = []
input_max_size = self._input_block_map[(- 1)][(- 1)]
with tf.compat.v1.variable_scope(self.name):
self.train_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='train_step')
for layer_id in range(self.num_layers):
with tf.variable_scope('layer_{}'.format(layer_id)):
self.w.append(tf.compat.v1.get_variable('Weight/w', shape=((input_max_size + (layer_id * self._weight_max_units)), self._weight_max_units), dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer()))
self.b.append(tf.compat.v1.get_variable('Bias/b', shape=(self._weight_max_units,), initializer=tf.initializers.zeros(), dtype=tf.float32))
with tf.compat.v1.variable_scope('stem_io'):
self.child_model_input = tf.compat.v1.placeholder(shape=(None, self._feature_max_size), dtype=tf.float32, name='child_input')
self.child_model_label = [tf.compat.v1.placeholder(shape=(None, self._child_output_size[i]), dtype=tf.float32, name=('child_output_%i' % i)) for i in range(len(self.output_node))]
if (self.feature_model is not None):
self.child_model_label_pipe = self.feature_model.y_it
self.child_model_input_pipe = self.feature_model.x_it
if self.with_output_blocks:
self.w_out = [tf.compat.v1.get_variable(('w_out_%i' % i), shape=((self._weight_max_units * self.num_layers), self._child_output_size[i]), dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer()) for i in range(len(self.output_node))]
self.b_out = [tf.compat.v1.get_variable(('b_out_%i' % i), shape=self._child_output_size[i], dtype=tf.float32, initializer=tf.initializers.zeros()) for i in range(len(self.output_node))]
else:
self.w_out = [tf.compat.v1.get_variable(('w_out_%i' % i), shape=(self._weight_max_units, self._child_output_size[i]), dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer()) for i in range(len(self.output_node))]
self.b_out = [tf.compat.v1.get_variable(('b_out_%i' % i), shape=self._child_output_size[i], dtype=tf.float32, initializer=tf.initializers.zeros()) for i in range(len(self.output_node))]
def _build_sample_arc(self):
'\n sample_output and sample_w_masks are the child model tensors that got built after getting a random sample\n from controller.sample_arc\n '
with tf.compat.v1.variable_scope(('%s/sample' % self.name)):
self.connect_controller(self.controller)
(sample_output, sample_w_masks, sample_layer_outputs, sample_dropouts) = self._build_dag(self.sample_arc)
self.sample_model_output = sample_output
self.sample_w_masks = sample_w_masks
self.sample_layer_outputs = sample_layer_outputs
ops = self._compile(w_masks=self.sample_w_masks, model_output=self.sample_model_output, use_pipe=True)
self.sample_train_op = ops['train_op']
self.sample_loss = ops['loss']
self.sample_optimizer = ops['optimizer']
self.sample_metrics = ops['metrics']
self.sample_ops = ops
self.sample_dropouts = sample_dropouts
def _build_fixed_arc(self):
'\n fixed_output and fixed_w_masks are the child model tensors built according to a fixed arc from user inputs\n '
with tf.compat.v1.variable_scope(('%s/fixed' % self.name)):
self._create_input_ph()
(fixed_output, fixed_w_masks, fixed_layer_outputs, fixed_dropouts) = self._build_dag(self.input_arc)
self.fixed_model_output = fixed_output
self.fixed_w_masks = fixed_w_masks
self.fixed_layer_outputs = fixed_layer_outputs
ops = self._compile(w_masks=self.fixed_w_masks, model_output=self.fixed_model_output, use_pipe=False)
self.fixed_train_op = ops['train_op']
self.fixed_loss = ops['loss']
self.fixed_optimizer = ops['optimizer']
self.fixed_metrics = ops['metrics']
self.fixed_ops = ops
self.fixed_dropouts = fixed_dropouts
def _build_dag(self, arc_seq):
'\n Shared DAG building process for both sampled arc and fixed arc\n Args:\n arc_seq:\n\n Returns:\n\n '
w_masks = []
layer_outputs = []
start_idx = 0
dropout_placeholders = []
if (self.feature_model is None):
inputs = self.child_model_input
elif ((self.feature_model.pseudo_inputs_pipe is None) or (type(arc_seq) is list)):
inputs = self.feature_model.pseudo_inputs
else:
inputs = self.feature_model.pseudo_inputs_pipe
input_dropprob = tf.placeholder_with_default(0.0, shape=(), name='dropout_input')
inputs = tf.nn.dropout(inputs, rate=input_dropprob)
dropout_placeholders.append(input_dropprob)
for layer_id in range(self.num_layers):
w = self.w[layer_id]
b = self.b[layer_id]
num_units = tf.nn.embedding_lookup(self._weight_units, arc_seq[start_idx])
col_mask = tf.cast(tf.less(tf.range(0, limit=self._weight_max_units, delta=1), num_units), tf.int32)
start_idx += 1
if self.with_input_blocks:
inp_mask = arc_seq[start_idx:(start_idx + self.num_input_blocks)]
inp_mask = tf.boolean_mask(self._input_block_map, tf.squeeze(inp_mask))
new_range = tf.range(0, limit=self._feature_max_size, dtype=tf.int32)
inp_mask = tf.map_fn((lambda x: tf.cast(tf.logical_and((x[0] <= new_range), (new_range < x[1])), dtype=tf.int32)), inp_mask)
inp_mask = tf.reduce_sum(inp_mask, axis=0)
start_idx += (self.num_input_blocks * self.with_input_blocks)
else:
inp_mask = (tf.ones(shape=self._feature_max_size, dtype=tf.int32) if (layer_id == 0) else tf.zeros(shape=self._feature_max_size, dtype=tf.int32))
if self.with_skip_connection:
if (layer_id > 0):
layer_mask = arc_seq[start_idx:(start_idx + layer_id)]
layer_mask = tf.boolean_mask(self._skip_conn_map[layer_id], layer_mask)
new_range2 = tf.range(0, limit=(layer_id * self._weight_max_units), delta=1, dtype=tf.int32)
layer_mask = tf.map_fn((lambda t: tf.cast(tf.logical_and((t[0] <= new_range2), (new_range2 < t[1])), dtype=tf.int32)), layer_mask)
layer_mask = tf.reduce_sum(layer_mask, axis=0)
start_idx += (layer_id * self.with_skip_connection)
else:
layer_mask = []
row_mask = tf.concat([inp_mask, layer_mask], axis=0)
else:
if (layer_id > 0):
layer_masks = [tf.zeros(shape=(self._weight_max_units * (layer_id - 1)), dtype=tf.int32), tf.ones(shape=self._weight_max_units, dtype=tf.int32)]
else:
layer_masks = []
row_mask = tf.concat(([inp_mask] + layer_masks), axis=0)
w_mask = tf.matmul(tf.expand_dims(row_mask, (- 1)), tf.expand_dims(col_mask, 0))
w = tf.where(tf.cast(w_mask, tf.bool), x=w, y=tf.fill(tf.shape(w), 0.0))
b = tf.where(tf.cast(col_mask, tf.bool), x=b, y=tf.fill(tf.shape(b), 0.0))
(x, drop_rate) = self._layer(w, b, inputs, layer_id)
dropout_placeholders.append(drop_rate)
layer_outputs.append(x)
inputs = tf.concat([inputs, x], axis=1)
w_masks.append((w, b))
if self.with_output_blocks:
model_output = []
output_arcs = arc_seq[start_idx:]
if (type(output_arcs) is list):
output_arcs_len = len(output_arcs)
else:
output_arcs_len = output_arcs.shape[0].value
assert (output_arcs_len == (self.num_output_blocks * self.num_layers)), ("model builder was specified to build output connections, but the input architecture didn't match output info; expected arc of length=%i, received %i" % ((start_idx + (self.num_output_blocks * self.num_layers)), (len(arc_seq) if (type(arc_seq) is list) else arc_seq.shape[0].value)))
layer_outputs_ = tf.concat(layer_outputs, axis=1)
for i in range(self.num_output_blocks):
output_mask = tf.boolean_mask(self._output_block_map, output_arcs[(i * self.num_layers):((i + 1) * self.num_layers)])
new_range = tf.range(0, limit=(self.num_layers * self._weight_max_units), delta=1, dtype=tf.int32)
output_mask = tf.map_fn((lambda t: tf.cast(tf.logical_and((t[0] <= new_range), (new_range < t[1])), dtype=tf.int32)), output_mask)
output_mask = tf.reduce_sum(output_mask, axis=0)
output_mask = tf.matmul(tf.expand_dims(output_mask, (- 1)), tf.ones((1, self._child_output_size[i]), dtype=tf.int32))
w = tf.where(tf.cast(output_mask, tf.bool), x=self.w_out[i], y=tf.fill(tf.shape(self.w_out[i]), 0.0))
model_output.append(get_tf_layer(self._child_output_func[i])((tf.matmul(layer_outputs_, w) + self.b_out[i])))
w_masks.append((w, self.b_out[i]))
else:
model_output = [get_tf_layer(self._child_output_func[i])((tf.matmul(x, self.w_out[i]) + self.b_out[i])) for i in range(len(self.output_node))]
return (model_output, w_masks, layer_outputs, dropout_placeholders)
def _layer(self, w, b, inputs, layer_id, use_dropout=True):
layer = get_tf_layer(self._actv_fn)((tf.matmul(inputs, w) + b))
if use_dropout:
drop_prob = tf.placeholder_with_default(0.0, shape=(), name=('dropout_%i' % layer_id))
layer = tf.nn.dropout(layer, rate=drop_prob)
return (layer, drop_prob)
else:
return (layer, None)
def _model(self, arc):
if (self.feature_model is None):
child_model_input = self.child_model_input
elif ((self.feature_model.pseudo_inputs_pipe is None) or (arc is not None)):
child_model_input = self.feature_model.x_inputs
else:
child_model_input = self.child_model_input_pipe
if (arc is None):
model = EnasAnnModel(inputs=child_model_input, outputs=self.sample_model_output, arc_seq=arc, dag=self, session=self.session)
else:
model = EnasAnnModel(inputs=child_model_input, outputs=self.fixed_model_output, arc_seq=arc, dag=self, session=self.session)
return model
def _create_input_ph(self):
ops_each_layer = 1
total_arc_len = sum(([(ops_each_layer + (self._input_block_map.shape[0] * self.with_input_blocks))] + [((ops_each_layer + (self._input_block_map.shape[0] * self.with_input_blocks)) + (i * self.with_skip_connection)) for i in range(1, self.num_layers)]))
if self.with_output_blocks:
total_arc_len += (self.num_output_blocks * self.num_layers)
self.input_ph_ = [tf.compat.v1.placeholder(shape=(), dtype=tf.int32, name='arc_{}'.format(i)) for i in range(total_arc_len)]
self.input_arc = self.input_ph_
return
def _compile(self, w_masks, model_output, use_pipe=True, var_scope=None):
'\n Compile loss and train_op here so all child models will share the same, instead of creating new ones every time\n '
loss = self.model_compile_dict['loss']
optimizer = self.model_compile_dict['optimizer']
metrics = (self.model_compile_dict['metrics'] if ('metrics' in self.model_compile_dict) else None)
var_scope = (var_scope or self.name)
with tf.compat.v1.variable_scope('compile', reuse=tf.AUTO_REUSE):
if ((self.feature_model is None) or (self.feature_model.pseudo_inputs_pipe is None)):
labels = self.child_model_label
elif use_pipe:
labels = self.child_model_label_pipe
else:
labels = self.child_model_label
loss_weights = (self.model_compile_dict['loss_weights'] if ('loss_weights' in self.model_compile_dict) else None)
if (type(loss) is str):
loss_ = [get_tf_loss(loss, labels[i], model_output[i]) for i in range(len(model_output))]
loss_ = tf.reduce_mean(loss_)
elif (type(loss) is list):
loss_ = []
for i in range(len(loss)):
loss_.append(get_tf_loss(loss[i], labels[i], model_output[i]))
loss_ = tf.reduce_sum(loss_)
elif callable(loss):
loss_ = tf.reduce_sum([loss(labels[i], model_output[i]) for i in range(len(model_output))])
else:
raise Exception(('expect loss to be str, list, dict or callable; got %s' % loss))
trainable_var = [var for var in tf.trainable_variables() if var.name.startswith(var_scope)]
if self.feature_model_trainable:
feature_model_trainable_var = [var for var in tf.trainable_variables() if var.name.startswith(self.feature_model.name)]
assert (len(feature_model_trainable_var) > 0), 'You asked me to train featureModel but there is no trainable variables in featureModel'
trainable_var += feature_model_trainable_var
regularization_penalty = 0.0
if (self.l1_reg > 0):
l1_regularizer = tf.contrib.layers.l1_regularizer(scale=self.l1_reg, scope=self.name)
l1_regularization_penalty = tf.contrib.layers.apply_regularization(l1_regularizer, [var[0] for var in w_masks])
loss_ += l1_regularization_penalty
else:
l1_regularization_penalty = 0.0
if (self.l2_reg > 0):
l2_regularizer = tf.contrib.layers.l2_regularizer(scale=self.l2_reg, scope=self.name)
l2_regularization_penalty = tf.contrib.layers.apply_regularization(l2_regularizer, [var[0] for var in w_masks])
loss_ += l2_regularization_penalty
else:
l2_regularization_penalty = 0.0
regularization_penalty += (l1_regularization_penalty + l2_regularization_penalty)
if (self.child_train_op_kwargs is None):
(train_op, lr, grad_norm, optimizer_) = get_keras_train_ops(loss=loss_, tf_variables=trainable_var, optim_algo=optimizer, train_step=self.train_step)
else:
(train_op, lr, grad_norm, optimizer_) = get_keras_train_ops(loss=loss_, tf_variables=trainable_var, train_step=self.train_step, optim_algo=optimizer, **self.child_train_op_kwargs)
if (metrics is None):
metrics = []
else:
metrics = [get_tf_metrics(x) for x in metrics]
metrics_ = [f(tf.squeeze(self.child_model_label[i]), tf.squeeze(model_output[i])) for i in range(len(model_output)) for f in metrics]
ops = {'train_op': train_op, 'lr': lr, 'grad_norm': grad_norm, 'optimizer': optimizer, 'loss': loss_, 'metrics': metrics_, 'reg_cost': regularization_penalty}
return ops
def connect_controller(self, controller):
self.sample_arc = controller.sample_arc
self.controller = controller
return
|
class EnasConv1dDAG():
def __init__(self, model_space, input_node, output_node, model_compile_dict, session, with_skip_connection=True, batch_size=128, keep_prob=0.9, l1_reg=0.0, l2_reg=0.0, reduction_factor=4, controller=None, child_train_op_kwargs=None, stem_config=None, data_format='NWC', train_fixed_arc=False, fixed_arc=None, name='EnasDAG', **kwargs):
'EnasCnnDAG is a DAG model builder for using the weight sharing framework.\n\n This class deals with the Convolutional neural network.\n\n Parameters\n ----------\n model_space: amber.architect.ModelSpace\n input_node: amber.architect.Operation, or list\n output_node: amber.architect.Operation, or list\n model_compile_dict: dict\n compile dict for child models\n session: tf.Session\n session for building enas DAG\n train_fixed_arc: bool\n boolean indicator for whether is the final stage; if is True, must provide `fixed_arc` and not connect\n to a controller\n fixed_arc: list-like\n the architecture for final stage training\n name: str\n '
assert ((type(input_node) in (State, tf.Tensor)) or (len(input_node) == 1)), 'EnasCnnDAG currently does not accept List type of inputs'
assert ((type(output_node) in (State, tf.Tensor)) or (len(output_node) == 1)), 'EnasCnnDAG currently does not accept List type of outputs'
self.input_node = input_node
self.output_node = output_node
self.num_layers = len(model_space)
self.model_space = model_space
self.model_compile_dict = model_compile_dict
self.session = session
self.l1_reg = l1_reg
self.l2_reg = l2_reg
self.with_skip_connection = with_skip_connection
self.controller = None
if (controller is not None):
self.set_controller(controller)
self.child_train_op_kwargs = child_train_op_kwargs
self.stem_config = (stem_config or {})
self.name = name
self.batch_size = batch_size
self.batch_dim = None
self.reduction_factor = reduction_factor
self.keep_prob = keep_prob
self.data_format = data_format
self.out_filters = None
self.branches = []
self.is_initialized = False
self.add_conv1_under_pool = kwargs.pop('add_conv1_under_pool', True)
self.train_fixed_arc = train_fixed_arc
self.fixed_arc = fixed_arc
if self.train_fixed_arc:
assert (self.fixed_arc is not None), 'if train_fixed_arc=True, must provide the architectures in `fixed_arc`'
assert (controller is None), 'if train_fixed_arc=True, must not provide controller'
self.skip_max_depth = None
self._verify_args()
self.vars = []
if (controller is None):
self.controller = None
print('this EnasDAG instance did not connect a controller; pleaes make sure you are only training a fixed architecture.')
else:
self.controller = controller
self._build_sample_arc()
self._build_fixed_arc()
self.session.run(tf.initialize_variables(self.vars))
def _verify_args(self):
out_filters = []
pool_layers = []
for layer_id in range(len(self.model_space)):
layer = self.model_space[layer_id]
this_out_filters = [l.Layer_attributes['filters'] for l in layer]
assert (len(set(this_out_filters)) == 1), ('EnasConv1dDAG only supports one identical number of filters per layer,but found %i different number of filters in layer %s' % (len(set(this_out_filters)), layer))
if (len(out_filters) and (this_out_filters[0] != out_filters[(- 1)])):
pool_layers.append((layer_id - 1))
out_filters.append(this_out_filters[0])
self.out_filters = out_filters
self.pool_layers = pool_layers
if self.train_fixed_arc:
assert (self.fixed_arc is not None)
skip_max_depth = {}
start_idx = 0
for layer_id in range(len(self.model_space)):
skip_max_depth[layer_id] = layer_id
operation = self.fixed_arc[start_idx]
total_choices = len(self.model_space[layer_id])
assert (0 <= operation < total_choices), ('Invalid operation selection: layer_id=%i, operation=%i, model space len=%i' % (layer_id, operation, total_choices))
if (layer_id > 0):
skip_binary = self.fixed_arc[(start_idx + 1):((start_idx + 1) + layer_id)]
skip = [i for i in range(layer_id) if (skip_binary[i] == 1)]
for d in skip:
skip_max_depth[d] = layer_id
start_idx += (1 + layer_id)
print(('-' * 80))
print(skip_max_depth)
self.skip_max_depth = skip_max_depth
if (type(self.input_node) is list):
self.input_node = self.input_node[0]
self.input_ph = tf.placeholder(shape=([self.batch_dim] + list(self.input_node.Layer_attributes['shape'])), name='child_input_placeholder', dtype=tf.float32)
if (type(self.output_node) is list):
self.output_node = self.output_node[0]
self.label_ph = tf.placeholder(shape=(self.batch_dim, self.output_node.Layer_attributes['units']), dtype=tf.float32, name='child_output_placeholder')
def __call__(self, arc_seq=None, **kwargs):
return self._model(arc_seq, **kwargs)
def _model(self, arc, **kwargs):
if self.train_fixed_arc:
assert ((arc == self.fixed_arc) or (arc is None)), 'This DAG instance is built to train fixed arc, hence you can only provide arc=None or arc=self.fixed_arc; check the initialization of this instances '
if (arc is None):
if self.train_fixed_arc:
model = EnasCnnModel(inputs=self.fixed_model_input, outputs=self.fixed_model_output, labels=self.fixed_model_label, arc_seq=arc, dag=self, session=self.session, name=self.name)
else:
model = EnasCnnModel(inputs=self.sample_model_input, outputs=self.sample_model_output, labels=self.sample_model_label, arc_seq=arc, dag=self, session=self.session, name=self.name)
else:
model = EnasCnnModel(inputs=self.fixed_model_input, outputs=self.fixed_model_output, labels=self.fixed_model_label, arc_seq=arc, dag=self, session=self.session, name=self.name)
return model
def set_controller(self, controller):
assert (self.controller is None), 'already has inherent controller, disallowed; start a new EnasCnnDAG instance if you want to connect another controller'
self.controller = controller
self.sample_arc = controller.sample_arc
def _build_sample_arc(self, input_tensor=None, label_tensor=None, **kwargs):
'\n Notes:\n I left `input_tensor` and `label_tensor` so that in the future some pipeline\n tensors can be connected to the model, instead of the placeholders as is now.\n\n Args:\n input_tensor:\n label_tensor:\n **kwargs:\n\n Returns:\n\n '
var_scope = self.name
is_training = kwargs.pop('is_training', True)
reuse = kwargs.pop('reuse', tf.AUTO_REUSE)
with tf.compat.v1.variable_scope(var_scope, reuse=reuse):
input_tensor = (self.input_ph if (input_tensor is None) else input_tensor)
label_tensor = (self.label_ph if (label_tensor is None) else label_tensor)
(model_out, dropout_placeholders) = self._build_dag(arc_seq=self.sample_arc, input_tensor=input_tensor, is_training=is_training, reuse=reuse)
self.sample_model_output = model_out
self.sample_model_input = input_tensor
self.sample_model_label = label_tensor
self.sample_dropouts = dropout_placeholders
ops = self._compile(model_output=[self.sample_model_output], labels=[label_tensor], is_training=is_training, var_scope=var_scope)
self.sample_train_op = ops['train_op']
self.sample_loss = ops['loss']
self.sample_optimizer = ops['optimizer']
self.sample_metrics = ops['metrics']
self.sample_ops = ops
vars_ = [v for v in tf.all_variables() if (v.name.startswith(var_scope) and (v not in self.vars))]
if len(vars_):
self.session.run(tf.initialize_variables(vars_))
self.vars += vars_
def _build_fixed_arc(self, input_tensor=None, label_tensor=None, **kwargs):
'\n Notes:\n I left `input_tensor` and `label_tensor` so that in the future some pipeline\n tensors can be connected to the model, instead of the placeholders as is now.\n\n Args:\n input_tensor:\n label_tensor:\n **kwargs:\n\n Returns:\n\n '
var_scope = self.name
if self.train_fixed_arc:
is_training = True
else:
is_training = kwargs.pop('is_training', False)
reuse = kwargs.pop('reuse', tf.AUTO_REUSE)
with tf.compat.v1.variable_scope(var_scope, reuse=reuse):
input_tensor = (self.input_ph if (input_tensor is None) else input_tensor)
label_tensor = (self.label_ph if (label_tensor is None) else label_tensor)
self._create_input_ph()
if self.train_fixed_arc:
(model_out, dropout_placeholders) = self._build_dag(arc_seq=self.fixed_arc, input_tensor=input_tensor, is_training=is_training, reuse=reuse)
else:
(model_out, dropout_placeholders) = self._build_dag(arc_seq=self.input_arc, input_tensor=input_tensor, is_training=is_training, reuse=reuse)
self.fixed_model_output = model_out
self.fixed_model_input = input_tensor
self.fixed_model_label = label_tensor
self.fixed_dropouts = dropout_placeholders
ops = self._compile(model_output=[self.fixed_model_output], labels=[label_tensor], is_training=is_training, var_scope=var_scope)
self.fixed_train_op = ops['train_op']
self.fixed_loss = ops['loss']
self.fixed_optimizer = ops['optimizer']
self.fixed_metrics = ops['metrics']
self.fixed_ops = ops
vars = [v for v in tf.all_variables() if (v.name.startswith(var_scope) and (v not in self.vars))]
if len(vars):
self.session.run(tf.initialize_variables(vars))
self.vars += vars
def _create_input_ph(self):
ops_each_layer = 1
total_arc_len = sum([(ops_each_layer + i) for i in range(self.num_layers)])
input_ph_ = [tf.compat.v1.placeholder(shape=(), dtype=tf.int32, name='arc_{}'.format(i)) for i in range(total_arc_len)]
self.input_arc = input_ph_
return
def _compile(self, model_output, labels=None, is_training=True, var_scope=None):
loss = self.model_compile_dict['loss']
optimizer = self.model_compile_dict['optimizer']
metrics = self.model_compile_dict.pop('metrics', None)
var_scope = (var_scope or self.name)
labels = (self.label_ph if (labels is None) else labels)
with tf.variable_scope('compile'):
if (type(loss) is str):
loss_ = [get_tf_loss(loss, labels[i], model_output[i]) for i in range(len(model_output))]
loss_ = tf.reduce_mean(loss_)
elif (type(loss) is list):
loss_ = []
for i in range(len(loss)):
loss_.append(get_tf_loss(loss[i], labels[i], model_output[i]))
loss_ = tf.reduce_sum(loss_)
elif callable(loss):
loss_ = tf.reduce_sum([loss(labels[i], model_output[i]) for i in range(len(model_output))])
else:
raise Exception(('expect loss to be str, list, dict or callable; got %s' % loss))
trainable_var = [var for var in tf.trainable_variables() if var.name.startswith(var_scope)]
regularization_penalty = 0.0
if (self.l1_reg > 0):
l1_regularizer = tf.contrib.layers.l1_regularizer(scale=self.l1_reg, scope=self.name)
l1_regularization_penalty = tf.contrib.layers.apply_regularization(l1_regularizer, [var for var in trainable_var if (var.name.split('/')[(- 1)] == 'w:0')])
loss_ += l1_regularization_penalty
else:
l1_regularization_penalty = 0.0
if (self.l2_reg > 0):
l2_regularizer = tf.contrib.layers.l2_regularizer(scale=self.l2_reg, scope=self.name)
l2_regularization_penalty = tf.contrib.layers.apply_regularization(l2_regularizer, [var for var in trainable_var if (var.name.split('/')[(- 1)] == 'w:0')])
loss_ += l2_regularization_penalty
else:
l2_regularization_penalty = 0.0
regularization_penalty += (l1_regularization_penalty + l2_regularization_penalty)
if is_training:
if (self.child_train_op_kwargs is None):
(train_op, lr, grad_norm, optimizer_) = get_keras_train_ops(loss=loss_, tf_variables=trainable_var, optim_algo=optimizer, train_step=self.train_step)
else:
(train_op, lr, grad_norm, optimizer_) = get_keras_train_ops(loss=loss_, tf_variables=trainable_var, train_step=self.train_step, optim_algo=optimizer, **self.child_train_op_kwargs)
else:
(train_op, lr, grad_norm, optimizer_) = (None, None, None, None)
if (metrics is None):
metrics = []
else:
metrics = [get_tf_metrics(x) for x in metrics]
metrics_ = [f(labels[i], model_output[i]) for i in range(len(model_output)) for f in metrics]
ops = {'train_op': train_op, 'lr': lr, 'grad_norm': grad_norm, 'optimizer': optimizer_, 'loss': loss_, 'metrics': metrics_, 'reg_cost': regularization_penalty}
return ops
def _build_dag(self, arc_seq, input_tensor=None, is_training=True, reuse=True):
self.layers = []
self.train_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='train_step')
dropout_placeholders = []
with tf.variable_scope('child_model', reuse=reuse):
out_filters = self.out_filters
input = (self.input_ph if (input_tensor is None) else input_tensor)
layers = []
has_stem_conv = True
if has_stem_conv:
with tf.variable_scope('stem_conv'):
stem_kernel_size = self.stem_config.pop('stem_kernel_size', 8)
stem_filters = out_filters[0]
w = create_weight('w', [stem_kernel_size, 1, stem_filters])
x = tf.nn.conv1d(input, w, 1, 'SAME', data_format=self.data_format)
x = batch_norm1d(x, is_training, data_format=self.data_format)
layers.append(x)
self.layers.append(x)
else:
layers = [input]
start_idx = 0
for layer_id in range(self.num_layers):
with tf.variable_scope('layer_{0}'.format(layer_id)):
x = self._layer(arc_seq, layer_id, layers, start_idx, out_filters[layer_id], is_training)
if is_training:
dropout_placeholders.append(tf.placeholder_with_default((1 - self.keep_prob), shape=(), name=('dropout_%s' % layer_id)))
x = tf.nn.dropout(x, rate=dropout_placeholders[(- 1)])
if (layer_id == 0):
layers = [x]
else:
layers.append(x)
self.layers.append(x)
if ((self.with_skip_connection is True) and (layer_id in self.pool_layers)):
with tf.variable_scope('pool_at_{0}'.format(layer_id)):
pooled_layers = []
for (i, layer) in enumerate(layers):
if (self.train_fixed_arc and (self.skip_max_depth[i] < layer_id)):
print(('Not building pool_at_%i/from_%i because its max-depth=%i' % (layer_id, i, self.skip_max_depth[i])))
x = layer
else:
with tf.variable_scope('from_{0}'.format(i)):
x = self._refactorized_channels_for_skipcon(layer, out_filters[(layer_id + 1)], is_training)
pooled_layers.append(x)
layers = pooled_layers
start_idx += (1 + (layer_id * int(self.with_skip_connection)))
flatten_op = (self.stem_config['flatten_op'] if ('flatten_op' in self.stem_config) else 'flatten')
if ((flatten_op == 'global_avg_pool') or (flatten_op == 'gap')):
keras_data_format = ('channels_last' if self.data_format.endswith('C') else 'channels_first')
x = tf.keras.layers.GlobalAveragePooling1D(data_format=keras_data_format)(x)
elif (flatten_op == 'flatten'):
keras_data_format = ('channels_last' if self.data_format.endswith('C') else 'channels_first')
x = tf.keras.layers.Flatten(data_format=keras_data_format)(x)
else:
raise Exception(('cannot understand flatten_op: %s' % flatten_op))
self.layers.append(x)
if is_training:
dropout_placeholders.append(tf.placeholder_with_default((1 - self.keep_prob), shape=(), name='last_conv_dropout'))
x = tf.nn.dropout(x, rate=dropout_placeholders[(- 1)])
with tf.variable_scope('fc'):
fc_units = (self.stem_config['fc_units'] if ('fc_units' in self.stem_config) else 1000)
if ((flatten_op == 'global_avg_pool') or (flatten_op == 'gap')):
try:
inp_c = x.get_shape()[(- 1)].value
except AttributeError:
inp_c = x.get_shape()[(- 1)]
w = create_weight('w_fc', [inp_c, fc_units])
elif (flatten_op == 'flatten'):
try:
inp_c = np.prod(x.get_shape()[1:]).value
except AttributeError:
inp_c = np.prod(x.get_shape()[1:])
w = create_weight('w_fc', [inp_c, fc_units])
else:
raise Exception(('Unknown fc string: %s' % flatten_op))
b = create_bias('b_fc', shape=[fc_units])
x = (tf.matmul(x, w) + b)
x = tf.nn.relu(x)
if is_training:
dropout_placeholders.append(tf.placeholder_with_default((1 - self.keep_prob), shape=(), name='last_conv_dropout'))
x = tf.nn.dropout(x, rate=dropout_placeholders[(- 1)])
w_out = create_weight('w_out', [fc_units, self.output_node.Layer_attributes['units']])
b_out = create_bias('b_out', shape=[self.output_node.Layer_attributes['units']])
model_output = get_tf_layer(self.output_node.Layer_attributes['activation'])((tf.matmul(x, w_out) + b_out))
return (model_output, dropout_placeholders)
def _refactorized_channels_for_skipcon(self, layer, out_filters, is_training):
'for dealing with mismatch-dimensions in skip connections: use a linear transformation'
if (self.data_format == 'NWC'):
try:
inp_c = layer.get_shape()[(- 1)].value
except AttributeError:
inp_c = layer.get_shape()[(- 1)]
actual_data_format = 'channels_last'
elif (self.data_format == 'NCW'):
try:
inp_c = layer.get_shape()[1].value
except AttributeError:
inp_c = layer.get_shape()[1]
actual_data_format = 'channels_first'
with tf.variable_scope('path1_conv'):
w = create_weight('w', [1, inp_c, out_filters])
x = tf.nn.conv1d(layer, filters=w, stride=1, padding='SAME')
x = tf.layers.max_pooling1d(x, self.reduction_factor, self.reduction_factor, 'SAME', data_format=actual_data_format)
return x
def _layer(self, arc_seq, layer_id, prev_layers, start_idx, out_filters, is_training):
inputs = prev_layers[(- 1)]
if (self.data_format == 'NWC'):
try:
inp_w = inputs.get_shape()[1].value
inp_c = inputs.get_shape()[2].value
except AttributeError:
inp_w = inputs.get_shape()[1]
inp_c = inputs.get_shape()[2]
elif (self.data_format == 'NCW'):
try:
inp_c = inputs.get_shape()[1].value
inp_w = inputs.get_shape()[2].value
except AttributeError:
inp_c = inputs.get_shape()[1]
inp_w = inputs.get_shape()[2]
else:
raise Exception(('cannot understand data format: %s' % self.data_format))
count = arc_seq[start_idx]
branches = {}
strides = []
for i in range(len(self.model_space[layer_id])):
if (self.train_fixed_arc and (i != count)):
continue
with tf.variable_scope(('branch_%i' % i)):
if (self.model_space[layer_id][i].Layer_type == 'conv1d'):
y = self._conv_branch(inputs, layer_attr=self.model_space[layer_id][i].Layer_attributes, is_training=is_training)
branches[tf.equal(count, i)] = y
elif (self.model_space[layer_id][i].Layer_type == 'maxpool1d'):
y = self._pool_branch(inputs, 'max', layer_attr=self.model_space[layer_id][i].Layer_attributes, is_training=is_training)
branches[tf.equal(count, i)] = y
strides.append(self.model_space[layer_id][i].Layer_attributes['strides'])
elif (self.model_space[layer_id][i].Layer_type == 'avgpool1d'):
y = self._pool_branch(inputs, 'avg', layer_attr=self.model_space[layer_id][i].Layer_attributes, is_training=is_training)
branches[tf.equal(count, i)] = y
strides.append(self.model_space[layer_id][i].Layer_attributes['strides'])
elif (self.model_space[layer_id][i].Layer_type == 'identity'):
y = self._identity_branch(inputs)
branches[tf.equal(count, i)] = y
else:
raise Exception(('Unknown layer: %s' % self.model_space[layer_id][i]))
self.branches.append(branches)
if (len(strides) > 0):
assert (len(set(strides)) == 1), ('If you set strides!=1 (i.e. a reduction layer), then all candidate operations must have the same strides to keep the shape identical; got %s' % strides)
inp_w = int(np.ceil((inp_w / strides[0])))
if self.train_fixed_arc:
ks = list(branches.keys())
assert (len(ks) == 1)
out = branches[ks[0]]()
else:
out = tf.case(branches, default=(lambda : tf.constant(0, tf.float32, shape=[self.batch_size, inp_w, out_filters])), exclusive=True)
if (self.data_format == 'NWC'):
out.set_shape([None, inp_w, out_filters])
elif (self.data_format == 'NCW'):
out.set_shape([None, out_filters, inp_w])
if ((self.with_skip_connection is True) and (layer_id > 0)):
skip_start = (start_idx + 1)
skip = arc_seq[skip_start:(skip_start + layer_id)]
with tf.variable_scope('skip'):
res_layers = []
for i in range(layer_id):
if self.train_fixed_arc:
res_layers = [prev_layers[i] for i in range(len(skip)) if (skip[i] == 1)]
else:
res_layers.append(tf.cond(tf.equal(skip[i], 1), (lambda : prev_layers[i]), (lambda : tf.stop_gradient(tf.zeros_like(prev_layers[i])))))
res_layers.append(out)
out = tf.add_n(res_layers)
out = batch_norm1d(out, is_training, data_format=self.data_format)
return out
def _conv_branch(self, inputs, layer_attr, is_training):
kernel_size = layer_attr['kernel_size']
activation_fn = layer_attr['activation']
dilation = (layer_attr['dilation'] if ('dilation' in layer_attr) else 1)
filters = layer_attr['filters']
if (self.data_format == 'NWC'):
try:
inp_c = inputs.get_shape()[(- 1)].value
except AttributeError:
inp_c = inputs.get_shape()[(- 1)]
elif (self.data_format == 'NCW'):
try:
inp_c = inputs.get_shape()[1].value
except AttributeError:
inp_c = inputs.get_shape()[1]
w = create_weight('w', [kernel_size, inp_c, filters])
x = tf.nn.conv1d(inputs, filters=w, stride=1, padding='SAME', dilations=dilation)
x = batch_norm1d(x, is_training, data_format=self.data_format)
b = create_bias('b', shape=[1])
x = get_tf_layer(activation_fn)((x + b))
return (lambda : x)
def _pool_branch(self, inputs, avg_or_max, layer_attr, is_training):
pool_size = layer_attr['pool_size']
strides = layer_attr['strides']
filters = layer_attr['filters']
if (self.data_format == 'NWC'):
try:
inp_c = inputs.get_shape()[(- 1)].value
except AttributeError:
inp_c = inputs.get_shape()[(- 1)]
actual_data_format = 'channels_last'
elif (self.data_format == 'NCW'):
try:
inp_c = inputs.get_shape()[1].value
except AttributeError:
inp_c = inputs.get_shape()[1]
actual_data_format = 'channels_first'
else:
raise Exception(('Unknown data format: %s' % self.data_format))
if self.add_conv1_under_pool:
with tf.variable_scope('conv_1'):
w = create_weight('w', [1, inp_c, filters])
x = tf.nn.conv1d(inputs, w, 1, 'SAME', data_format=self.data_format)
x = batch_norm1d(x, is_training, data_format=self.data_format)
x = tf.nn.relu(x)
else:
x = inputs
with tf.variable_scope('pool'):
if (avg_or_max == 'avg'):
x = tf.layers.average_pooling1d(x, pool_size, strides, 'SAME', data_format=actual_data_format)
elif (avg_or_max == 'max'):
x = tf.layers.max_pooling1d(x, pool_size, strides, 'SAME', data_format=actual_data_format)
else:
raise ValueError('Unknown pool {}'.format(avg_or_max))
return (lambda : x)
def _identity_branch(self, inputs):
return (lambda : inputs)
|
class EnasConv1DwDataDescrption(EnasConv1dDAG):
'This is a modeler that specifiied for convolution network with data description features\n Date: 2020.5.17\n '
def __init__(self, data_description, *args, **kwargs):
self.data_description = data_description
super().__init__(*args, **kwargs)
if (len(self.data_description.shape) < 2):
self.data_description = np.expand_dims(self.data_description, axis=0)
def _model(self, arc, **kwargs):
'\n Overwrite the parent `_model` method to feed the description to controller when sampling architectures\n :param arc:\n :param kwargs:\n :return:\n '
if self.train_fixed_arc:
assert ((arc == self.fixed_arc) or (arc is None)), 'This DAG instance is built to train fixed arc, hence you can only provide arc=None or arc=self.fixed_arc; check the initialization of this instances '
if (arc is None):
if self.train_fixed_arc:
model = EnasCnnModel(inputs=self.fixed_model_input, outputs=self.fixed_model_output, labels=self.fixed_model_label, arc_seq=arc, dag=self, session=self.session, name=self.name)
else:
model = EnasCnnModel(inputs=self.sample_model_input, outputs=self.sample_model_output, labels=self.sample_model_label, arc_seq=arc, dag=self, session=self.session, name=self.name, sample_dag_feed_dict={self.controller.data_descriptive_feature: self.data_description})
else:
model = EnasCnnModel(inputs=self.fixed_model_input, outputs=self.fixed_model_output, labels=self.fixed_model_label, arc_seq=arc, dag=self, session=self.session, name=self.name)
return model
|
class ModelBuilder():
'Scaffold of Model Builder\n '
def __init__(self, inputs, outputs, *args, **kwargs):
raise NotImplementedError('Abstract method.')
def __call__(self, model_states):
raise NotImplementedError('Abstract method.')
|
class DAGModelBuilder(ModelBuilder):
def __init__(self, inputs_op, output_op, model_space, model_compile_dict, num_layers=None, with_skip_connection=True, with_input_blocks=True, dag_func=None, *args, **kwargs):
if (type(inputs_op) not in (list, tuple)):
self.inputs_op = [inputs_op]
warnings.warn('inputs_op should be list-like; if only one input, try using ``[inputs_op]`` as argument', stacklevel=2)
else:
self.inputs_op = inputs_op
self.output_op = output_op
self.model_space = model_space
self.num_layers = (num_layers or len(self.model_space))
self.model_compile_dict = model_compile_dict
self.with_input_blocks = with_input_blocks
self.with_skip_connection = with_skip_connection
self.dag_func_ = dag_func
self.dag_func = (get_dag(dag_func) if (dag_func is not None) else DAG)
def __str__(self):
s = ('DAGModelBuilder with builder %s' % self.dag_func_)
return s
def __call__(self, arc_seq, *args, **kwargs):
input_nodes = self._get_input_nodes()
output_node = self._get_output_node()
dag = self.dag_func(*args, arc_seq=arc_seq, num_layers=self.num_layers, model_space=self.model_space, input_node=input_nodes, output_node=output_node, with_skip_connection=self.with_skip_connection, with_input_blocks=self.with_input_blocks, **kwargs)
try:
model = dag._build_dag()
model.compile(**self.model_compile_dict)
except ValueError:
print(arc_seq)
raise Exception('above')
return model
def _get_input_nodes(self):
input_nodes = []
for node_op in self.inputs_op:
node = ComputationNode(node_op, node_name=node_op.Layer_attributes['name'])
input_nodes.append(node)
return input_nodes
def _get_output_node(self):
if (type(self.output_op) is list):
raise Exception('DAG currently does not accept output_op in List')
output_node = ComputationNode(self.output_op, node_name='output')
return output_node
|
class EnasAnnModelBuilder(DAGModelBuilder):
'This function builds an Artificial Neural net.\n\n It uses tensorflow low-level API to define a big graph, where\n each child network architecture is a subgraph in this big DAG.\n\n Parameters\n ----------\n session\n controller\n dag_func\n l1_reg\n l2_reg\n with_output_blocks\n use_node_dag\n feature_model\n dag_kwargs\n args\n kwargs\n '
def __init__(self, session=None, controller=None, dag_func='EnasAnnDAG', l1_reg=0.0, l2_reg=0.0, with_output_blocks=False, use_node_dag=True, feature_model=None, dag_kwargs=None, *args, **kwargs):
super().__init__(*args, dag_func=dag_func, **kwargs)
assert (dag_func.lower() in ('enas', 'enasanndag')), 'EnasModelBuilder only support enasDAG.'
self.session = session
self.controller = controller
self.l1_reg = float(l1_reg)
self.l2_reg = float(l2_reg)
self.node_dag = None
self.use_node_dag = use_node_dag
self.feature_model = feature_model
self.dag_kwargs = (dag_kwargs or {})
self.with_output_blocks = with_output_blocks
assert (not (self.with_output_blocks and self.use_node_dag)), 'Currently `use_node_dag` is incompatible with `with_output_blocks`'
self._build_dag()
def _build_dag(self):
'\n Args:\n *args:\n **kwargs: When needed, `feature_model` will be parsed through kwargs to DAG\n\n Returns:\n\n '
self.dag = self.dag_func(model_space=self.model_space, input_node=self.inputs_op, output_node=self.output_op, with_input_blocks=self.with_input_blocks, with_skip_connection=self.with_skip_connection, with_output_blocks=self.with_output_blocks, session=self.session, model_compile_dict=self.model_compile_dict, l1_reg=self.l1_reg, l2_reg=self.l2_reg, controller=self.controller, feature_model=self.feature_model, **self.dag_kwargs)
def __call__(self, arc_seq=None, *args, **kwargs):
input_nodes = self._get_input_nodes()
output_node = self._get_output_node()
if self.use_node_dag:
self.node_dag = (lambda x: get_dag('InputBlockDAG')(arc_seq=x, num_layers=self.num_layers, model_space=self.model_space, input_node=input_nodes, output_node=output_node, with_skip_connection=self.with_skip_connection, with_input_blocks=self.with_input_blocks))
model = self.dag(arc_seq, node_builder=self.node_dag)
model.compile(**self.model_compile_dict)
return model
def set_controller(self, controller):
self.dag.set_controller(controller)
def _get_output_node(self):
if (type(self.output_op) is list):
output_node = [ComputationNode(self.output_op[i], node_name=self.output_op[i].Layer_attributes['name']) for i in range(len(self.output_op))]
else:
output_node = [ComputationNode(self.output_op, node_name='output')]
return output_node
|
class EnasCnnModelBuilder(DAGModelBuilder):
def __init__(self, session=None, controller=None, dag_func='EnasConv1DDAG', l1_reg=0.0, l2_reg=0.0, batch_size=None, dag_kwargs=None, *args, **kwargs):
'\n Args:\n session:\n controller:\n dag_func:\n l1_reg:\n use_node_dag: if True, will use InputBlockDAG to build a list of `ComputationNode`s to record the parent/\n child relationships; otherwise, do not use node_dag.\n Currently `use_node_dag=True` is incompatible with `with_output_blocks=True`\n *args:\n **kwargs:\n '
super().__init__(*args, dag_func=dag_func, **kwargs)
self.session = session
self.controller = controller
self.l1_reg = float(l1_reg)
self.l2_reg = float(l2_reg)
self.batch_size = (batch_size or 128)
self.dag_kwargs = (dag_kwargs or {})
self._build_dag()
assert issubclass(type(self.dag), EnasConv1dDAG), 'EnasModelBuilder only support enasDAG and its derivatives'
def _build_dag(self):
'\n Args:\n *args:\n **kwargs: When needed, `feature_model` will be parsed through kwargs to DAG\n\n Returns:\n\n '
self.dag = self.dag_func(model_space=self.model_space, input_node=self.inputs_op, output_node=self.output_op, session=self.session, model_compile_dict=self.model_compile_dict, l1_reg=self.l1_reg, l2_reg=self.l2_reg, controller=self.controller, batch_size=self.batch_size, **self.dag_kwargs)
def __call__(self, arc_seq=None, *args, **kwargs):
model = self.dag(arc_seq, **kwargs)
model.compile(**self.model_compile_dict)
return model
def set_controller(self, controller):
self.dag.set_controller(controller)
|
class KerasModelBuilder(ModelBuilder):
def __init__(self, inputs_op, output_op, model_compile_dict, model_space=None, gpus=None, **kwargs):
self.model_compile_dict = model_compile_dict
self.input_node = inputs_op
self.output_node = output_op
self.model_space = model_space
self.gpus = gpus
def __call__(self, model_states):
if ((self.gpus is None) or (self.gpus == 1)):
model = build_sequential_model(model_states=model_states, input_state=self.input_node, output_state=self.output_node, model_compile_dict=self.model_compile_dict, model_space=self.model_space)
elif (type(self.gpus) is int):
model = build_multi_gpu_sequential_model(model_states=model_states, input_state=self.input_node, output_state=self.output_node, model_compile_dict=self.model_compile_dict, model_space=self.model_space, gpus=self.gpus)
elif (type(self.gpus) is list):
mirrored_strategy = tf.distribute.MirroredStrategy(devices=self.gpus)
with mirrored_strategy.scope():
model = build_sequential_model(model_states=model_states, input_state=self.input_node, output_state=self.output_node, model_compile_dict=self.model_compile_dict, model_space=self.model_space)
return model
|
class KerasBranchModelBuilder(ModelBuilder):
def __init__(self, inputs_op, output_op, model_compile_dict, model_space=None, with_bn=False, **kwargs):
assert (type(model_space) is BranchedModelSpace)
assert (len(inputs_op) == len(model_space.subspaces[0]))
self.inputs_op = inputs_op
self.output_op = output_op
self.model_space = model_space
self.model_compile_dict = model_compile_dict
self.with_bn = with_bn
self._branch_to_layer = self.model_space.branch_to_layer
def _build_branch(self, input_op, model_states, model_space):
if issubclass(type(input_op), Operation):
inp = get_layer(None, input_op)
else:
inp = input_op
x = inp
assert (len(model_states) > 0)
for (i, state) in enumerate(model_states):
if issubclass(type(state), Operation):
x = get_layer(x, state)
elif (issubclass(type(state), int) or np.issubclass_(type(state), np.integer)):
assert (model_space is not None), 'if provided integer model_arc, must provide model_space in kwargs'
x = get_layer(x, model_space[i][state], with_bn=self.with_bn)
else:
raise Exception(('cannot understand %s of type %s' % (state, type(state))))
return (inp, x)
def __call__(self, model_states, **kwargs):
inps = []
branches = []
for i in range(len(self.inputs_op)):
(inp, out) = self._build_branch(input_op=self.inputs_op[i], model_states=[model_states[j] for j in self._branch_to_layer[(0, i)]], model_space=self.model_space.subspaces[0][i])
inps.append(inp)
branches.append(out)
if (self.model_space.concat_op == 'concatenate'):
branch_merge = get_layer(x=branches, state=Operation('concatenate'))
else:
raise ValueError(('Model builder cannot understand model space concat op: %s' % self.model_space.conat_op))
(_, h) = self._build_branch(input_op=branch_merge, model_states=[model_states[j] for j in self._branch_to_layer[(1, None)]], model_space=self.model_space.subspaces[1])
out = get_layer(x=h, state=self.output_op)
model = Model(inputs=inps, outputs=out)
model.compile(**self.model_compile_dict)
return model
|
class KerasResidualCnnBuilder(ModelBuilder):
"Function class for converting an architecture sequence tokens to a Keras model\n\n Parameters\n ----------\n inputs_op : amber.architect.modelSpace.Operation\n output_op : amber.architect.modelSpace.Operation\n fc_units : int\n number of units in the fully-connected layer\n flatten_mode : {'GAP', 'Flatten'}\n the flatten mode to convert conv layers to fully-connected layers.\n model_compile_dict : dict\n model_space : amber.architect.modelSpace.ModelSpace\n dropout_rate : float\n dropout rate, must be 0<dropout_rate<1\n wsf : int\n width scale factor\n "
def __init__(self, inputs_op, output_op, fc_units, flatten_mode, model_compile_dict, model_space, dropout_rate=0.2, wsf=1, add_conv1_under_pool=True, verbose=1, **kwargs):
self.model_compile_dict = model_compile_dict
self.inputs = inputs_op
self.outputs = output_op
self.fc_units = fc_units
self.verbose = verbose
assert (flatten_mode.lower() in {'gap', 'flatten'}), ('Unknown flatten mode: %s' % flatten_mode)
self.flatten_mode = flatten_mode.lower()
self.model_space = model_space
self.dropout_rate = dropout_rate
self.wsf = wsf
self.add_conv1_under_pool = add_conv1_under_pool
self.decoder = ResConvNetArchitecture(model_space=model_space)
def __call__(self, model_states):
model = self._convert(model_states, verbose=self.verbose)
if (model is not None):
model.compile(**self.model_compile_dict)
return model
def _convert(self, arc_seq, verbose=True):
(out_filters, pool_layers) = self.get_out_filters(self.model_space)
inp = get_layer(x=None, state=self.inputs)
stem_conv = Operation('conv1d', kernel_size=8, filters=out_filters[0], activation='linear')
x = self.res_layer(stem_conv, self.wsf, inp, name='stem_conv', add_conv1_under_pool=self.add_conv1_under_pool)
start_idx = 0
layers = []
for layer_id in range(len(self.model_space)):
if verbose:
print(('start_idx=%i, layer id=%i, out_filters=%i x %i' % (start_idx, layer_id, out_filters[layer_id], self.wsf)))
count = arc_seq[start_idx]
this_layer = self.model_space[layer_id][count]
if verbose:
print(this_layer)
if (layer_id == 0):
x = self.res_layer(this_layer, self.wsf, x, name=('L%i' % layer_id), add_conv1_under_pool=self.add_conv1_under_pool)
else:
x = self.res_layer(this_layer, self.wsf, layers[(- 1)], name=('L%i' % layer_id), add_conv1_under_pool=self.add_conv1_under_pool)
if (layer_id > 0):
skip = arc_seq[(start_idx + 1):((start_idx + layer_id) + 1)]
skip_layers = [layers[i] for i in range(len(layers)) if (skip[i] == 1)]
if verbose:
print(('skip=%s' % skip))
if len(skip_layers):
skip_layers.append(x)
x = Add(name=('L%i_resAdd' % layer_id))(skip_layers)
x = BatchNormalization(name=('L%i_resBn' % layer_id))(x)
if (self.dropout_rate != 0):
x = Dropout(self.dropout_rate, name=('L%i_dropout' % layer_id))(x)
layers.append(x)
if (layer_id in pool_layers):
pooled_layers = []
for (i, layer) in enumerate(layers):
pooled_layers.append(self.factorized_reduction_layer(layer, (out_filters[(layer_id + 1)] * self.wsf), name=('pool_at_%i_from_%i' % (layer_id, i))))
if verbose:
print(('pooled@%i, %s' % (layer_id, pooled_layers)))
layers = pooled_layers
start_idx += (1 + layer_id)
if verbose:
print(('-' * 80))
if (self.flatten_mode == 'gap'):
x = GlobalAveragePooling1D()(x)
elif (self.flatten_mode == 'flatten'):
x = Flatten()(x)
else:
raise Exception(('Unknown flatten mode: %s' % self.flatten_mode))
if (self.dropout_rate != 0):
x = Dropout(self.dropout_rate)(x)
x = Dense(units=self.fc_units, activation='relu')(x)
out = get_layer(x=x, state=self.outputs)
model = Model(inputs=inp, outputs=out)
return model
@staticmethod
def factorized_reduction_layer(inp, out_filter, name, reduction_factor=4):
x = Conv1D(out_filter, kernel_size=1, strides=1, kernel_initializer='he_normal', use_bias=False, padding='same', name=name)(inp)
x = MaxPooling1D(pool_size=reduction_factor, strides=reduction_factor, padding='same')(x)
return x
@staticmethod
def res_layer(layer, width_scale_factor, inputs, l2_reg=5e-07, name='layer', add_conv1_under_pool=True):
if (layer.Layer_type == 'conv1d'):
activation = layer.Layer_attributes['activation']
num_filters = (width_scale_factor * layer.Layer_attributes['filters'])
kernel_size = layer.Layer_attributes['kernel_size']
if ('dilation' in layer.Layer_attributes):
dilation = layer.Layer_attributes['dilation']
else:
dilation = 1
x = Conv1D(num_filters, kernel_size=kernel_size, strides=1, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(l2_reg), kernel_constraint=constraints.max_norm(0.9), use_bias=False, name=(('%s_conv' % name) if (dilation == 1) else ('%s_conv_d%i' % (name, dilation))), dilation_rate=dilation)(inputs)
x = BatchNormalization(name=('%s_bn' % name))(x)
if (activation in ('None', 'linear')):
pass
elif (activation in ('relu', 'sigmoid', 'tanh', 'softmax', 'elu')):
x = Activation(activation, name=('%s_%s' % (name, activation)))(x)
elif (activation == 'leaky_relu'):
x = LeakyReLU(alpha=0.2, name=('%s_%s' % (name, activation)))(x)
else:
raise Exception(('Unknown activation: %s' % activation))
elif ((layer.Layer_type == 'maxpool1d') or (layer.Layer_type == 'avgpool1d')):
num_filters = (width_scale_factor * layer.Layer_attributes['filters'])
pool_size = layer.Layer_attributes['pool_size']
if add_conv1_under_pool:
x = Conv1D(num_filters, kernel_size=1, strides=1, padding='same', kernel_initializer='he_normal', use_bias=False, name=('%s_maxpool_conv' % name))(inputs)
x = BatchNormalization(name=('%s_bn' % name))(x)
x = Activation('relu', name=('%s_relu' % name))(x)
else:
x = inputs
if (layer.Layer_type == 'maxpool1d'):
x = MaxPooling1D(pool_size=pool_size, strides=1, padding='same', name=('%s_maxpool' % name))(x)
elif (layer.Layer_type == 'avgpool1d'):
x = AveragePooling1D(pool_size=pool_size, strides=1, padding='same', name=('%s_avgpool' % name))(x)
else:
raise Exception(('Unknown pool: %s' % layer.Layer_type))
elif (layer.Layer_type == 'identity'):
x = Lambda((lambda t: t), name=('%s_id' % name))(inputs)
else:
raise Exception(('Unknown type: %s' % layer.Layer_type))
return x
@staticmethod
def get_out_filters(model_space):
out_filters = []
pool_layers = []
for layer_id in range(len(model_space)):
layer = model_space[layer_id]
this_out_filters = [l.Layer_attributes['filters'] for l in layer]
assert (len(set(this_out_filters)) == 1), ('EnasConv1dDAG only supports one identical number of filters per layer,but found %i in layer %s' % (len(set(this_out_filters)), layer))
if (len(out_filters) and (this_out_filters[0] != out_filters[(- 1)])):
pool_layers.append((layer_id - 1))
out_filters.append(this_out_filters[0])
return (out_filters, pool_layers)
|
class KerasMultiIOModelBuilder(ModelBuilder):
'\n Note:\n Still not working if num_outputs=0\n '
def __init__(self, inputs_op, output_op, model_compile_dict, model_space, with_input_blocks, with_output_blocks, dropout_rate=0.2, wsf=1, **kwargs):
self.model_compile_dict = model_compile_dict
self.inputs = inputs_op
self.outputs = output_op
self.model_space = model_space
self.num_inputs = (len(inputs_op) if (type(inputs_op) in (list, tuple)) else 0)
self.num_outputs = (len(output_op) if (type(output_op) in (list, tuple)) else 0)
assert (not (self.num_inputs == (0 & self.num_outputs) == 0)), 'MultiIO cannot have single input and single output at the same time '
self.with_input_blocks = with_input_blocks
self.with_output_blocks = with_output_blocks
if self.with_input_blocks:
assert (self.num_inputs > 0), 'you specified with_input_blocks=True for KerasMultiIOModelBuilder, but only provided 1 num_inputs '
self.decoder = MultiIOArchitecture(num_layers=len(self.model_space), num_inputs=(self.num_inputs * self.with_input_blocks), num_outputs=(self.num_outputs * self.with_output_blocks))
def __call__(self, model_states):
model = self._convert(model_states)
if (model is not None):
model.compile(**self.model_compile_dict)
return model
def _convert(self, arc_seq, with_bn=True, wsf=1):
inputs = ([get_layer(x=None, state=x) for x in self.inputs] if (self.num_inputs > 0) else [get_layer(x=None, state=self.inputs)])
(op, inp, skp, out) = self.decoder.decode(arc_seq)
out_rowsum = np.apply_along_axis(np.sum, 1, out)
out_colsum = np.apply_along_axis(np.sum, 0, out)
skp_rowsum = np.array(([1] + [sum(x) for x in skp]))
with_input_blocks = self.with_input_blocks
if any((out_rowsum == 0)):
print('invalid model: unconnected output')
return None
if ((self.with_input_blocks is False) and any(((skp_rowsum == 0) & (out_colsum != 0)))):
print('invalid model: output connected to layer with no input')
return None
prev_layers = []
for layer_id in range(len(self.model_space)):
this_op = op[layer_id]
if with_input_blocks:
this_inputs = [inputs[i] for i in np.where(inp[layer_id])[0]]
else:
this_inputs = (inputs if (layer_id == 0) else [])
if (layer_id > 0):
this_inputs += [prev_layers[i] for i in np.where(skp[(layer_id - 1)])[0] if (prev_layers[i] is not None)]
model_op = copy.deepcopy(self.model_space[layer_id][this_op])
if ('units' in model_op.Layer_attributes):
model_op.Layer_attributes['units'] *= wsf
elif ('filters' in model_op.Layer_attributes):
model_op.Layer_attributes['filters'] *= wsf
else:
raise Exception('Cannot use wsf')
if (len(this_inputs) > 1):
input_tensor = Concatenate()(this_inputs)
layer = get_layer(x=input_tensor, state=model_op, with_bn=with_bn)
prev_layers.append(layer)
elif (len(this_inputs) == 1):
input_tensor = this_inputs[0]
layer = get_layer(x=input_tensor, state=model_op, with_bn=with_bn)
prev_layers.append(layer)
else:
prev_layers.append(None)
outputs_inputs = []
for (m, o) in enumerate(out):
idx = [i for i in np.where(o)[0] if (prev_layers[i] is not None)]
if (len(idx) > 1):
outputs_inputs.append(Concatenate()([prev_layers[i] for i in idx]))
elif (len(idx) == 1):
outputs_inputs.append(prev_layers[idx[0]])
else:
print(('Secondary unconnected output %i' % m))
return None
outputs = [get_layer(x=outputs_inputs[i], state=self.outputs[i]) for i in range(self.num_outputs)]
model = Model(inputs=inputs, outputs=outputs)
return model
|
def build_sequential_model(model_states, input_state, output_state, model_compile_dict, **kwargs):
"\n Parameters\n ----------\n model_states: a list of _operators sampled from operator space\n input_state:\n output_state: specifies the output tensor, e.g. Dense(1, activation='sigmoid')\n model_compile_dict: a dict of `loss`, `optimizer` and `metrics`\n\n Returns\n ---------\n Keras.Model\n "
inp = get_layer(None, input_state)
x = inp
model_space = kwargs.pop('model_space', None)
for (i, state) in enumerate(model_states):
if issubclass(type(state), Operation):
x = get_layer(x, state)
elif (issubclass(type(state), int) or np.issubclass_(type(state), np.integer)):
assert (model_space is not None), 'if provided integer model_arc, must provide model_space in kwargs'
x = get_layer(x, model_space[i][state])
else:
raise Exception(('cannot understand %s of type %s' % (state, type(state))))
out = get_layer(x, output_state)
model = Model(inputs=inp, outputs=out)
if (not kwargs.pop('stop_compile', False)):
model.compile(**model_compile_dict)
return model
|
def build_multi_gpu_sequential_model(model_states, input_state, output_state, model_compile_dict, gpus=4, **kwargs):
try:
from tensorflow.keras.utils import multi_gpu_model
except Exception as e:
raise Exception(('multi gpu not supported in keras. check your version. Error: %s' % e))
with tf.device('/cpu:0'):
vanilla_model = build_sequential_model(model_states, input_state, output_state, model_compile_dict, stop_compile=True, **kwargs)
model = multi_gpu_model(vanilla_model, gpus=gpus)
model.compile(**model_compile_dict)
return model
|
def build_sequential_model_from_string(model_states_str, input_state, output_state, state_space, model_compile_dict):
'build a sequential model from a string of states\n '
assert (len(model_states_str) == len(state_space))
str_to_state = [[str(state) for state in state_space[i]] for i in range(len(state_space))]
try:
model_states = [state_space[i][str_to_state[i].index(model_states_str[i])] for i in range(len(state_space))]
except ValueError:
raise Exception('model_states_str not found in state-space')
return build_sequential_model(model_states, input_state, output_state, model_compile_dict)
|
def build_multi_gpu_sequential_model_from_string(model_states_str, input_state, output_state, state_space, model_compile_dict):
'build a sequential model from a string of states\n '
assert (len(model_states_str) == len(state_space))
str_to_state = [[str(state) for state in state_space[i]] for i in range(len(state_space))]
try:
model_states = [state_space[i][str_to_state[i].index(model_states_str[i])] for i in range(len(state_space))]
except ValueError:
raise Exception('model_states_str not found in state-space')
return build_multi_gpu_sequential_model(model_states, input_state, output_state, model_compile_dict)
|
def compare_motif_diff_size(P, Q):
'find maximum match between two metrics\n P and Q with different sizes\n '
best_d = float('inf')
P_half_len = int(np.ceil((P.shape[0] / 2.0)))
Q_pad = np.concatenate([(np.ones((P_half_len, Q.shape[1])) / Q.shape[1]), Q, (np.ones((P_half_len, Q.shape[1])) / Q.shape[1])], axis=0)
for i in range(0, ((Q_pad.shape[0] - P.shape[0]) + 1)):
d = multinomial_KL_divergence(P, Q_pad[i:(i + len(P))])
if (d < best_d):
best_d = d
best_d /= float(P.shape[0])
return best_d
|
def remove_dup_motif(motif_dict, threshold=0.05):
tmp = {}
for motif_name in motif_dict:
this_motif = motif_dict[motif_name]
best_d = 100
for (_, ref_motif) in tmp.items():
new_d = compare_motif_diff_size(this_motif, ref_motif)
if (new_d < best_d):
best_d = new_d
if (best_d <= threshold):
continue
else:
tmp[motif_name] = this_motif
return tmp
|
def make_output_annot(label_annot, cat_list):
split_vec = []
for x in cat_list:
if (type(x) is str):
split_vec.append(np.where((label_annot.category == x))[0])
elif (type(x) in (list, tuple)):
split_vec.append([i for i in range(label_annot.shape[0]) if (label_annot.category[i] in x)])
else:
raise Exception(('Unknown category: %s' % x))
output_annot = []
for i in range(len(cat_list)):
for j in range(len(split_vec[i])):
k = split_vec[i][j]
output_annot.append({'block': i, 'index': j, 'label_category': label_annot.iloc[k].category, 'label_name': ('%s_%s' % (label_annot.iloc[k].target, label_annot.iloc[k].cell)), 'label_index': k})
return output_annot
|
def basewise_bits(motif):
epsilon = 0.001
assert (motif.shape[1] == 4)
ent = np.apply_along_axis((lambda x: (- np.sum((x * np.log2(np.clip(x, epsilon, (1 - epsilon))))))), 1, motif)
return ent
|
def group_motif_by_factor(motif_dict, output_factors, threshold=0.1, pure_only=True):
new_dict = defaultdict(dict)
black_list_factors = ['CTCF', 'NFKB']
for motif_name in motif_dict:
factor = motif_name.split('_')[0]
if (not (factor in output_factors)):
continue
if (factor in black_list_factors):
continue
if pure_only:
ent = basewise_bits(motif_dict[motif_name])
ENT_CUTOFF = 0.5
MIN_ENT_BASES = 5
if (np.sum((ent < ENT_CUTOFF)) < MIN_ENT_BASES):
continue
new_dict[factor].update({motif_name: motif_dict[motif_name]})
for factor in new_dict:
new_dict[factor] = remove_dup_motif(new_dict[factor], threshold=threshold)
return new_dict
|
def get_seq_chunks(idx, min_cont=5, max_gap=3):
intv = (idx[1:] - idx[:(- 1)])
break_points = np.concatenate([[(- 1)], np.where((intv > max_gap))[0], [(idx.shape[0] - 1)]])
if (len(break_points) <= 3):
return []
chunks = []
for i in range(0, (len(break_points) - 1)):
tmp = idx[[(break_points[i] + 1), break_points[(i + 1)]]]
if ((tmp[1] - tmp[0]) > min_cont):
chunks.append(tmp)
return chunks
|
def convert_ppm_to_pwm(m, eps=0.001):
m = np.clip(m, eps, (1 - eps))
m_ = (np.log2(m) - np.log2(0.25))
return m_
|
def create_conv_from_motif(input_ph, motifs, factor_name='None'):
max_motif_len = max([m.shape[0] for m in motifs.values()])
new_motifs = []
for m in motifs.values():
left = ((max_motif_len - m.shape[0]) // 2)
right = ((max_motif_len - m.shape[0]) - left)
m_ = np.concatenate([(np.ones((left, 4)) / 4.0), m, (np.ones((right, 4)) / 4.0)])
m_ = convert_ppm_to_pwm(m_)
new_motifs.append(m_)
w = np.stack(new_motifs).transpose([1, 2, 0])
with tf.variable_scope(factor_name, reuse=tf.AUTO_REUSE):
conv_w = tf.Variable(w, dtype=tf.float64, name='conv_w')
conv_out = tf.nn.conv1d(input_ph, filters=conv_w, stride=1, padding='SAME', name='conv_out')
conv_out = tf.reduce_max(conv_out, axis=(- 1))
return (conv_out, conv_w)
|
def _index_to_label(index):
'Convert a pandas index or multiindex to an axis label.'
if isinstance(index, pd.MultiIndex):
return '-'.join(map(to_utf8, index.names))
else:
return index.name
|
def _index_to_ticklabels(index):
'Convert a pandas index or multiindex into ticklabels.'
if isinstance(index, pd.MultiIndex):
return ['-'.join(map(to_utf8, i)) for i in index.values]
else:
return index.values
|
def _matrix_mask(data, mask):
'Ensure that data and mask are compatabile and add missing values.\n\n Values will be plotted for cells where ``mask`` is ``False``.\n\n ``data`` is expected to be a DataFrame; ``mask`` can be an array or\n a DataFrame.\n\n '
if (mask is None):
mask = np.zeros(data.shape, np.bool)
if isinstance(mask, np.ndarray):
if (mask.shape != data.shape):
raise ValueError('Mask must have the same shape as data.')
mask = pd.DataFrame(mask, index=data.index, columns=data.columns, dtype=np.bool)
elif isinstance(mask, pd.DataFrame):
if ((not mask.index.equals(data.index)) and mask.columns.equals(data.columns)):
err = 'Mask must have the same index and columns as data.'
raise ValueError(err)
mask = (mask | pd.isnull(data))
return mask
|
class _HeatMapper2(object):
'Draw a heatmap plot of a matrix with nice labels and colormaps.'
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt, annot_kws, cellsize, cellsize_vmax, cbar, cbar_kws, xticklabels=True, yticklabels=True, mask=None, ax_kws=None, rect_kws=None):
'Initialize the plotting object.'
if isinstance(data, pd.DataFrame):
plot_data = data.values
else:
plot_data = np.asarray(data)
data = pd.DataFrame(plot_data)
mask = _matrix_mask(data, mask)
plot_data = np.ma.masked_where(np.asarray(mask), plot_data)
xtickevery = 1
if isinstance(xticklabels, int):
xtickevery = xticklabels
xticklabels = _index_to_ticklabels(data.columns)
elif (xticklabels is True):
xticklabels = _index_to_ticklabels(data.columns)
elif (xticklabels is False):
xticklabels = []
ytickevery = 1
if isinstance(yticklabels, int):
ytickevery = yticklabels
yticklabels = _index_to_ticklabels(data.index)
elif (yticklabels is True):
yticklabels = _index_to_ticklabels(data.index)
elif (yticklabels is False):
yticklabels = []
(nx, ny) = data.T.shape
if (not len(xticklabels)):
self.xticks = []
self.xticklabels = []
elif (isinstance(xticklabels, string_types) and (xticklabels == 'auto')):
self.xticks = 'auto'
self.xticklabels = _index_to_ticklabels(data.columns)
else:
(self.xticks, self.xticklabels) = self._skip_ticks(xticklabels, xtickevery)
if (not len(yticklabels)):
self.yticks = []
self.yticklabels = []
elif (isinstance(yticklabels, string_types) and (yticklabels == 'auto')):
self.yticks = 'auto'
self.yticklabels = _index_to_ticklabels(data.index)
else:
(self.yticks, self.yticklabels) = self._skip_ticks(yticklabels, ytickevery)
xlabel = _index_to_label(data.columns)
ylabel = _index_to_label(data.index)
self.xlabel = (xlabel if (xlabel is not None) else '')
self.ylabel = (ylabel if (ylabel is not None) else '')
self._determine_cmap_params(plot_data, vmin, vmax, cmap, center, robust)
self._determine_cellsize_params(plot_data, cellsize, cellsize_vmax)
if (annot is None):
annot = False
annot_data = None
elif isinstance(annot, bool):
if annot:
annot_data = plot_data
else:
annot_data = None
else:
try:
annot_data = annot.values
except AttributeError:
annot_data = annot
if (annot.shape != plot_data.shape):
raise ValueError('Data supplied to "annot" must be the same shape as the data to plot.')
annot = True
self.data = data
self.plot_data = plot_data
self.annot = annot
self.annot_data = annot_data
self.fmt = fmt
self.annot_kws = ({} if (annot_kws is None) else annot_kws)
self.annot_kws.setdefault('color', 'black')
self.annot_kws.setdefault('ha', 'center')
self.annot_kws.setdefault('va', 'center')
self.cbar = cbar
self.cbar_kws = ({} if (cbar_kws is None) else cbar_kws)
self.cbar_kws.setdefault('ticks', mpl.ticker.MaxNLocator(6))
self.ax_kws = ({} if (ax_kws is None) else ax_kws)
self.rect_kws = ({} if (rect_kws is None) else rect_kws)
def _determine_cmap_params(self, plot_data, vmin, vmax, cmap, center, robust):
'Use some heuristics to set good defaults for colorbar and range.'
calc_data = plot_data.data[(~ np.isnan(plot_data.data))]
if (vmin is None):
vmin = (np.percentile(calc_data, 2) if robust else calc_data.min())
if (vmax is None):
vmax = (np.percentile(calc_data, 98) if robust else calc_data.max())
(self.vmin, self.vmax) = (vmin, vmax)
if (cmap is None):
if (center is None):
self.cmap = cm.rocket
else:
self.cmap = cm.icefire
elif isinstance(cmap, string_types):
self.cmap = mpl.cm.get_cmap(cmap)
elif isinstance(cmap, list):
self.cmap = mpl.colors.ListedColormap(cmap)
else:
self.cmap = cmap
if (center is not None):
vrange = max((vmax - center), (center - vmin))
normlize = mpl.colors.Normalize((center - vrange), (center + vrange))
(cmin, cmax) = normlize([vmin, vmax])
cc = np.linspace(cmin, cmax, 256)
self.cmap = mpl.colors.ListedColormap(self.cmap(cc))
def _determine_cellsize_params(self, plot_data, cellsize, cellsize_vmax):
if (cellsize is None):
self.cellsize = np.ones(plot_data.shape)
self.cellsize_vmax = 1.0
else:
if isinstance(cellsize, pd.DataFrame):
cellsize = cellsize.values
self.cellsize = cellsize
if (cellsize_vmax is None):
cellsize_vmax = cellsize.max()
self.cellsize_vmax = cellsize_vmax
def _skip_ticks(self, labels, tickevery):
'Return ticks and labels at evenly spaced intervals.'
n = len(labels)
if (tickevery == 0):
(ticks, labels) = ([], [])
elif (tickevery == 1):
(ticks, labels) = ((np.arange(n) + 0.5), labels)
else:
(start, end, step) = (0, n, tickevery)
ticks = (np.arange(start, end, step) + 0.5)
labels = labels[start:end:step]
return (ticks, labels)
def _auto_ticks(self, ax, labels, axis):
'Determine ticks and ticklabels that minimize overlap.'
transform = ax.figure.dpi_scale_trans.inverted()
bbox = ax.get_window_extent().transformed(transform)
size = [bbox.width, bbox.height][axis]
axis = [ax.xaxis, ax.yaxis][axis]
(tick,) = axis.set_ticks([0])
fontsize = tick.label.get_size()
max_ticks = int((size // (fontsize / 72)))
if (max_ticks < 1):
return ([], [])
tick_every = ((len(labels) // max_ticks) + 1)
tick_every = (1 if (tick_every == 0) else tick_every)
(ticks, labels) = self._skip_ticks(labels, tick_every)
return (ticks, labels)
def plot(self, ax, cax):
'Draw the heatmap on the provided Axes.'
despine(ax=ax, left=True, bottom=True)
(height, width) = self.plot_data.shape
(xpos, ypos) = np.meshgrid((np.arange(width) + 0.5), (np.arange(height) + 0.5))
data = self.plot_data.data
cellsize = self.cellsize
mask = self.plot_data.mask
if ((not isinstance(mask, np.ndarray)) and (not mask)):
mask = np.zeros(self.plot_data.shape, np.bool)
annot_data = self.annot_data
if (not self.annot):
annot_data = np.zeros(self.plot_data.shape)
for (x, y, m, val, s, an_val) in zip(xpos.flat, ypos.flat, mask.flat, data.flat, cellsize.flat, annot_data.flat):
if (not m):
vv = ((val - self.vmin) / (self.vmax - self.vmin))
size = np.clip((s / self.cellsize_vmax), 0.1, 1.0)
color = self.cmap(vv)
rect = plt.Rectangle([(x - (size / 2)), (y - (size / 2))], size, size, facecolor=color, **self.rect_kws)
ax.add_patch(rect)
if self.annot:
annotation = (('{:' + self.fmt) + '}').format(an_val)
text = ax.text(x, y, annotation, **self.annot_kws)
text_luminance = relative_luminance(text.get_color())
text_edge_color = ('.15' if (text_luminance > 0.408) else 'w')
text.set_path_effects([mpl.patheffects.withStroke(linewidth=1, foreground=text_edge_color)])
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
ax.set(**self.ax_kws)
if self.cbar:
norm = mpl.colors.Normalize(vmin=self.vmin, vmax=self.vmax)
scalar_mappable = mpl.cm.ScalarMappable(cmap=self.cmap, norm=norm)
scalar_mappable.set_array(self.plot_data.data)
cb = ax.figure.colorbar(scalar_mappable, cax, ax, **self.cbar_kws)
cb.outline.set_linewidth(0)
if (isinstance(self.xticks, string_types) and (self.xticks == 'auto')):
(xticks, xticklabels) = self._auto_ticks(ax, self.xticklabels, 0)
else:
(xticks, xticklabels) = (self.xticks, self.xticklabels)
if (isinstance(self.yticks, string_types) and (self.yticks == 'auto')):
(yticks, yticklabels) = self._auto_ticks(ax, self.yticklabels, 1)
else:
(yticks, yticklabels) = (self.yticks, self.yticklabels)
ax.set(xticks=xticks, yticks=yticks)
xtl = ax.set_xticklabels(xticklabels)
ytl = ax.set_yticklabels(yticklabels, rotation='vertical')
ax.figure.draw(ax.figure.canvas.get_renderer())
if axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation='vertical')
if axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation='horizontal')
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
ax.invert_yaxis()
|
def heatmap2(data, vmin=None, vmax=None, cmap=None, center=None, robust=False, annot=None, fmt='.2g', annot_kws=None, cellsize=None, cellsize_vmax=None, cbar=True, cbar_kws=None, cbar_ax=None, square=False, xticklabels='auto', yticklabels='auto', mask=None, ax=None, ax_kws=None, rect_kws=None):
plotter = _HeatMapper2(data, vmin, vmax, cmap, center, robust, annot, fmt, annot_kws, cellsize, cellsize_vmax, cbar, cbar_kws, xticklabels, yticklabels, mask, ax_kws, rect_kws)
if (ax is None):
ax = plt.gca()
if square:
ax.set_aspect('equal')
ax.grid(False)
plotter.plot(ax, cbar_ax)
return ax
|
def plot_nx_dag(ss, save_fn=None):
reset_plot()
g = nx.DiGraph()
g.add_edges_from([(i, ss.nodes[i].child[j].id) for i in ss.nodes for j in range(len(ss.nodes[i].child))])
pos = graphviz_layout(g, prog='dot')
nx.draw_networkx_nodes(g, pos, cmap=plt.get_cmap('jet'), node_size=500)
nx.draw_networkx_labels(g, pos, font_size=7.5)
nx.draw_networkx_edges(g, pos, arrow=True)
if save_fn:
plt.savefig(save_fn)
else:
plt.show()
|
def plot_all_stats(file_list, baseline_file):
dfs = [read_csv(f) for f in file_list]
base_df = read_csv(baseline_file)
n_run = 20
find_min_dict = {'Knowledge': True, 'Loss': True, 'Accuracy': False}
for key in ['Knowledge', 'Accuracy', 'Loss']:
plt.close()
for (i, df) in enumerate(dfs):
stat = df[key]
name = file_list[i][:(- 4)].split('/')[(- 1)]
plt.plot(accum_opt(stat[:n_run], find_min_dict[key]), label=name)
plt.hlines(y=(sum(base_df[key]) / float(base_df.shape[0])), xmin=0, xmax=n_run, linestyle='dashed', label='baseline')
plt.legend(loc='best')
plt.xlabel('Number of trajectories', fontsize=16)
plt.ylabel(key, fontsize=16)
plt.savefig('./tmp/all_stats_{}.png'.format(key.lower()))
|
def get_random_data(num_samps=1000):
x = np.random.sample(((10 * 4) * num_samps)).reshape((num_samps, 10, 4))
y = np.random.sample(num_samps)
return (x, y)
|
def get_class_name(*args):
id = args[1]
map = {0: 'TestGeneralEnv', 1: 'TestEnasEnv'}
return map[id]
|
@parameterized_class(attrs=('foo', 'manager_getter', 'controller_getter', 'modeler_getter', 'trainenv_getter'), input_values=[(0, architect.GeneralManager, architect.GeneralController, modeler.KerasResidualCnnBuilder, architect.ControllerTrainEnvironment), (1, architect.EnasManager, architect.GeneralController, modeler.EnasCnnModelBuilder, architect.EnasTrainEnv)], class_name_func=get_class_name)
class TestEnvDryRun(testing_utils.TestCase):
'Test dry-run will only aim to construct a train env class w/o examining its behaviors; however, this will\n serve as the scaffold for other tests\n '
manager_getter = architect.GeneralManager
controller_getter = architect.GeneralController
modeler_getter = modeler.KerasResidualCnnBuilder
trainenv_getter = architect.ControllerTrainEnvironment
def __init__(self, *args, **kwargs):
super(TestEnvDryRun, self).__init__(*args, **kwargs)
self.train_data = get_random_data(50)
self.val_data = get_random_data(10)
(self.model_space, _) = testing_utils.get_example_conv1d_space(out_filters=8, num_layers=2)
self.reward_fn = architect.reward.LossReward()
self.store_fn = architect.store.get_store_fn('minimal')
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
self.session = tf.Session()
self.controller = self.controller_getter(model_space=self.model_space, buffer_type='ordinal', with_skip_connection=True, kl_threshold=0.05, buffer_size=1, batch_size=3, session=self.session, train_pi_iter=10, lstm_size=16, lstm_num_layers=1, optim_algo='adam', skip_target=0.8, skip_weight=0.4)
self.model_fn = self.modeler_getter(model_space=self.model_space, inputs_op=architect.Operation('input', shape=(10, 4)), output_op=architect.Operation('dense', units=1, activation='sigmoid'), fc_units=5, flatten_mode='gap', model_compile_dict={'optimizer': 'adam', 'loss': 'mse'}, batch_size=10, session=self.session, controller=self.controller, verbose=0)
self.manager = self.manager_getter(train_data=self.train_data, validation_data=self.val_data, model_fn=self.model_fn, reward_fn=self.reward_fn, store_fn=self.store_fn, working_dir=self.tempdir.name, child_batchsize=10, epochs=1, verbose=0)
self.env = self.trainenv_getter(self.controller, self.manager, max_episode=20, max_step_per_ep=1, logger=None, resume_prev_run=False, should_plot=True, working_dir=self.tempdir.name, with_skip_connection=True)
def tearDown(self):
super(TestEnvDryRun, self).tearDown()
self.tempdir.cleanup()
def test_build(self):
self.env.train()
self.assertTrue(os.path.isfile(os.path.join(self.tempdir.name, 'controller_weights.h5')))
self.assertTrue(os.path.isfile(os.path.join(self.tempdir.name, 'train_history.csv')))
|
class TestBuffer(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestBuffer, self).__init__(*args, **kwargs)
self.buffer_getter = architect.buffer.get_buffer('ordinal')
def setUp(self):
super(TestBuffer, self).setUp()
self.tempdir = tempfile.TemporaryDirectory()
self.buffer = self.buffer_getter(max_size=4, is_squeeze_dim=True)
(self.state_space, _) = testing_utils.get_example_conv1d_space(num_layers=2)
self.num_ops_per_layer = len(self.state_space[0])
for _ in range(8):
(state, proba, act) = self._get_data()
self.buffer.store(state=state, prob=proba, action=act, reward=1)
def tearDown(self):
self.tempdir.cleanup()
super(TestBuffer, self).tearDown()
def _get_data(self):
state = np.random.sample(4).reshape((1, 1, 4))
proba = [np.random.sample(self.num_ops_per_layer).reshape((1, self.num_ops_per_layer)), np.random.sample(self.num_ops_per_layer).reshape((1, self.num_ops_per_layer)), np.random.sample(2).reshape((1, 1, 2))]
act = np.random.choice(2, 3).astype('int')
return (state, proba, act)
def test_finish_path(self):
self.buffer.finish_path(state_space=self.state_space, global_ep=0, working_dir=self.tempdir.name)
self.assertNotEqual(len(self.buffer.lt_abuffer), 0)
self.assertNotEqual(len(self.buffer.lt_pbuffer), 0)
self.assertNotEqual(len(self.buffer.lt_adbuffer), 0)
def test_get(self):
self.buffer.finish_path(state_space=self.state_space, global_ep=0, working_dir=self.tempdir.name)
cnt = 0
for data in self.buffer.get_data(bs=2):
cnt += 1
(_, probas, acts, ads, rewards) = data
self.assertEqual(len(ads), 2)
for pr in probas:
self.assertEqual(len(pr), 2)
self.assertEqual(len(acts), 2)
self.assertEqual(len(rewards), 2)
self.assertEqual(type(probas), list)
self.assertEqual(cnt, 4)
|
class TestReplayBuffer(TestBuffer):
def __init__(self, *args, **kwargs):
super(TestReplayBuffer, self).__init__(*args, **kwargs)
self.buffer_getter = architect.buffer.get_buffer('replay')
|
class TestMultiManagerBuffer(TestBuffer):
def __init__(self, *args, **kwargs):
super(TestMultiManagerBuffer, self).__init__(*args, **kwargs)
self.buffer_getter = architect.buffer.get_buffer('multimanager')
def setUp(self):
super(TestBuffer, self).setUp()
self.tempdir = tempfile.TemporaryDirectory()
self.buffer = self.buffer_getter(max_size=4, is_squeeze_dim=True)
(self.state_space, _) = testing_utils.get_example_conv1d_space(num_layers=2)
self.num_ops_per_layer = len(self.state_space[0])
for manager_index in range(4):
for i in range(8):
(_, proba, act) = self._get_data()
self.buffer.store(prob=proba, action=act, reward=manager_index, manager_index=manager_index, description=np.array([manager_index, (manager_index ** 2)]).reshape((1, 2)))
def test_get(self):
self.buffer.finish_path(state_space=self.state_space, global_ep=0, working_dir=self.tempdir.name)
cnt = 0
for data in self.buffer.get_data(bs=2):
cnt += 1
self.assertEqual(type(data), dict)
probas = data['prob']
acts = data['action']
ads = data['advantage']
rewards = data['reward']
desc = data['description']
self.assertEqual(desc[0][0], rewards[0][0])
self.assertEqual(desc[1][0], rewards[1][0])
self.assertEqual(desc[0][1], (rewards[0][0] ** 2))
self.assertEqual(desc[1][1], (rewards[1][0] ** 2))
self.assertEqual(len(ads), 2)
self.assertEqual(type(probas), list)
for pr in probas:
self.assertEqual(len(pr), 2)
self.assertEqual(len(acts), 2)
self.assertEqual(len(rewards), 2)
self.assertEqual(len(desc), 2)
self.assertEqual(cnt, 16)
|
class TestReward(unittest.TestCase):
data = (None, np.array([0, 0, 0, 1, 1, 1]))
model = testing_utils.PseudoModel(pred_retr=np.array([(- 1), (- 1), (- 1), 1, 1, 1]), eval_retr={'val_loss': 0.5})
knowledge_fn = testing_utils.PseudoKnowledge(k_val=0.2)
|
class TestAucReward(TestReward):
def __init__(self, *args, **kwargs):
super(TestAucReward, self).__init__(*args, **kwargs)
self.reward_getter = architect.reward.LossAucReward
@parameterized.expand([('auc', 1), ('aupr', 1), ((lambda y_true, y_score: scipy.stats.spearmanr(y_true, y_score)[0]), 1), ((lambda y_true, y_score: scipy.stats.pearsonr(y_true, y_score)[0]), 1)])
def test_methods_call(self, method, expect_reward):
reward_fn = self.reward_getter(method=method)
(reward, loss_and_metrics, reward_metrics) = reward_fn(model=self.model, data=self.data)
self.assertEqual(reward, expect_reward)
self.assertTrue(hasattr(reward_fn, 'knowledge_function'))
|
class TestLossReward(TestReward):
def __init__(self, *args, **kwargs):
super(TestLossReward, self).__init__(*args, **kwargs)
self.reward_getter = architect.reward.LossReward
def test_methods_call(self):
reward_fn = self.reward_getter()
(reward, loss_and_metrics, reward_metrics) = reward_fn(model=self.model, data=self.data)
self.assertEqual(reward, (- 0.5))
self.assertTrue(hasattr(reward_fn, 'knowledge_function'))
|
@parameterized_class(('eval_retr', 'k_val', 'Lambda', 'exp_reward'), [(0.5, 0.2, 1, (- 0.7)), (0.5, 0.2, 2, (- 0.9)), (0.5, 0.2, 0, (- 0.5))])
class TestKnowledgeReward(TestReward):
def __init__(self, *args, **kwargs):
super(TestKnowledgeReward, self).__init__(*args, **kwargs)
self.reward_getter = architect.reward.KnowledgeReward
self.model.eval_retr = self.eval_retr
self.knowledge_fn.k_val = self.k_val
def test_methods_call(self):
reward_fn = self.reward_getter(knowledge_function=self.knowledge_fn, Lambda=self.Lambda)
(reward, loss_and_metrics, reward_metrics) = reward_fn(model=self.model, data=self.data)
self.assertEqual(reward, self.exp_reward)
self.assertTrue(hasattr(reward_fn, 'knowledge_function'))
|
class TestManager(testing_utils.TestCase):
x = np.random.sample(((10 * 4) * 1000)).reshape((1000, 10, 4))
y = np.random.sample(1000)
def __init__(self, *args, **kwargs):
super(TestManager, self).__init__(*args, **kwargs)
self.reward_fn = testing_utils.PseudoReward()
self.model_fn = testing_utils.PseudoConv1dModelBuilder(input_shape=(10, 4), output_units=1)
self.store_fn = testing_utils.PseudoCaller()
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
@parameterized.expand([(1, architect.GeneralManager), ((- 1), architect.GeneralManager), (1, architect.EnasManager), ((- 1), architect.EnasManager)])
def test_get_reward(self, exp_reward, manager_getter):
self.reward_fn.retr_val = exp_reward
self.manager = manager_getter(train_data=(self.x, self.y), validation_data=(self.x, self.y), model_fn=self.model_fn, reward_fn=self.reward_fn, store_fn=self.store_fn, working_dir=self.tempdir.name, epochs=1, verbose=0)
(reward, loss_and_metrics) = self.manager.get_rewards(trial=0, model_arc=[0, 0, 0])
self.assertEqual(reward, exp_reward)
def tearDown(self):
self.tempdir.cleanup()
super(TestManager, self).tearDown()
|
class TestStore(testing_utils.TestCase):
x = np.random.sample(((10 * 4) * 1000)).reshape((1000, 10, 4))
y = np.random.sample(1000)
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
self.trial = 0
self.model = testing_utils.PseudoConv1dModelBuilder(input_shape=(10, 4), output_units=1)()
model_checkpointer = tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(self.tempdir.name, 'temp_network.h5'), monitor='loss', save_best_only=True)
self.history = self.model.fit(self.x, self.y, batch_size=100, epochs=5, verbose=0, callbacks=[model_checkpointer])
self.pred = self.model.predict(self.x, verbose=0)
eval_retr = self.model.evaluate(self.x, self.y, verbose=0)
self.loss_and_metrics = {'loss': eval_retr}
@parameterized.expand([('general', ('weights/trial_0/bestmodel.h5', 'weights/trial_0/pred.txt')), ('minimal', ('weights/trial_0/bestmodel.h5',)), ('model_plot', ('weights/trial_0/bestmodel.h5', 'weights/trial_0/pred.txt', 'weights/trial_0/model_arc.png'))])
def test_store_fn(self, store_name, files):
store_fn = architect.store.get_store_fn(store_name)
store_fn(trial=self.trial, model=self.model, hist=self.history, data=(self.x, self.y), pred=self.pred, loss_and_metrics=self.loss_and_metrics, working_dir=self.tempdir.name)
for f in files:
self.assertTrue(os.path.isfile(os.path.join(self.tempdir.name, f)))
def tearDown(self):
super(TestStore, self).tearDown()
self.tempdir.cleanup()
|
class TestModelSpace(testing_utils.TestCase):
def test_conv1d_model_space(self):
model_space = architect.ModelSpace()
num_layers = 2
out_filters = 8
layer_ops = [architect.Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu'), architect.Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu'), architect.Operation('maxpool1d', filters=out_filters, pool_size=4, strides=1), architect.Operation('avgpool1d', filters=out_filters, pool_size=4, strides=1)]
for i in range(num_layers):
model_space.add_layer(i, copy.copy(layer_ops))
self.assertLen(model_space, num_layers)
self.assertLen(model_space[0], 4)
model_space.add_layer(2, copy.copy(layer_ops))
self.assertLen(model_space, (num_layers + 1))
model_space.add_state(2, architect.Operation('identity', filters=out_filters))
self.assertLen(model_space[2], 5)
model_space.delete_state(2, 4)
self.assertLen(model_space[2], 4)
model_space.delete_layer(2)
self.assertLen(model_space, num_layers)
|
class TestGeneralController(testing_utils.TestCase):
def setUp(self):
super(TestGeneralController, self).setUp()
self.session = tf.Session()
(self.model_space, _) = testing_utils.get_example_conv1d_space()
self.controller = architect.GeneralController(model_space=self.model_space, buffer_type='ordinal', with_skip_connection=True, kl_threshold=0.05, buffer_size=15, batch_size=5, session=self.session, train_pi_iter=2, lstm_size=32, lstm_num_layers=1, lstm_keep_prob=1.0, optim_algo='adam', skip_target=0.8, skip_weight=0.4)
def test_get_architecture(self):
(act, prob) = self.controller.get_action()
self.assertIsInstance(act, np.ndarray)
self.assertIsInstance(prob, list)
i = 0
for layer_id in range(len(self.model_space)):
pr = prob[i]
self.assertAllClose(pr.flatten(), ([(1.0 / len(pr.flatten()))] * len(pr.flatten())), atol=0.05)
if (layer_id > 0):
pr = prob[(i + 1)]
self.assertAllClose(pr.flatten(), ([0.5] * len(pr.flatten())), atol=0.05)
i += 1
i += 1
def test_optimize(self):
(act, prob) = self.controller.get_action()
feed_dict = {self.controller.input_arc[i]: [[act[i]]] for i in range(len(act))}
feed_dict.update({self.controller.advantage: [[1]]})
feed_dict.update({self.controller.old_probs[i]: prob[i] for i in range(len(self.controller.old_probs))})
feed_dict.update({self.controller.reward: [[1]]})
for _ in range(100):
self.session.run(self.controller.train_op, feed_dict=feed_dict)
(act2, prob2) = self.controller.get_action()
self.assertAllEqual(act, act2)
|
class TestOperationController(testing_utils.TestCase):
def setUp(self):
super(TestOperationController, self).setUp()
(self.model_space, _) = testing_utils.get_example_conv1d_space()
self.controller = architect.OperationController(state_space=self.model_space, controller_units=8, kl_threshold=0.05, buffer_size=15, batch_size=5, train_pi_iter=2)
self.tempdir = tempfile.TemporaryDirectory()
def test_optimize(self):
seed = np.array(([0] * 3)).reshape((1, 1, 3))
for _ in range(8):
(act, proba) = self.controller.get_action(seed)
self.controller.store(state=seed, prob=proba, action=act, reward=_)
self.controller.train(episode=0, working_dir=self.tempdir.name)
def tearDown(self):
super(TestOperationController, self).tearDown()
self.tempdir.cleanup()
|
def get_controller(state_space, sess):
'Test function for building controller network. A controller is a LSTM cell that predicts the next\n layer given the previous layer and all previous layers (as stored in the hidden cell states). The\n controller model is trained by policy gradients as in reinforcement learning.\n '
with tf.device('/cpu:0'):
controller = GeneralController(model_space=state_space, lstm_size=16, lstm_num_layers=1, with_skip_connection=False, kl_threshold=0.05, train_pi_iter=50, optim_algo='adam', buffer_size=5, batch_size=5, session=sess, use_ppo_loss=False, verbose=0)
return controller
|
def get_mock_manager(history_fn_list, Lambda=1.0, wd='./tmp_mock'):
'Test function for building a mock manager. A mock manager\n returns a loss and knowledge instantly based on previous\n training history.\n '
manager = MockManager(history_fn_list=history_fn_list, model_compile_dict={'loss': 'binary_crossentropy', 'optimizer': 'adam', 'metrics': ['acc']}, working_dir=wd, Lambda=Lambda, verbose=0)
return manager
|
def get_environment(controller, manager, should_plot=True, logger=None, wd='./tmp_mock/'):
'Test function for getting a training environment for controller.\n '
env = ControllerTrainEnvironment(controller, manager, max_episode=100, max_step_per_ep=3, logger=logger, resume_prev_run=False, should_plot=should_plot, working_dir=wd, with_skip_connection=False, with_input_blocks=False)
return env
|
class TestBootstrap(testing_utils.TestCase):
Lambda = 1
def __init__(self, *args, **kwargs):
super(TestBootstrap, self).__init__(*args, **kwargs)
self.hist_file_list = [os.path.join(os.path.dirname(__file__), ('mock_black_box/tmp_%i/train_history.csv.gz' % i)) for i in range(1, 21)]
self.state_space = get_state_space()
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
self.session = tf.Session()
self.manager = get_mock_manager(self.hist_file_list, Lambda=self.Lambda, wd=self.tempdir.name)
self.controller = get_controller(self.state_space, self.session)
self.env = get_environment(self.controller, self.manager, wd=self.tempdir.name)
def _sample_rewards(self):
rewards = []
for _ in range(10):
arc = self.controller.get_action()[0]
arc = [self.controller.model_space[layer_id][i] for (layer_id, i) in enumerate(arc)]
res = self.manager.get_rewards((- 1), model_arc=arc)[0]
rewards.append(res)
return rewards
def test_run(self):
old_rewards = self._sample_rewards()
self.env.train()
self.assertTrue(os.path.isfile(os.path.join(self.tempdir.name, 'train_history.csv')))
self.assertTrue(os.path.isfile(os.path.join(self.tempdir.name, 'controller_weights.h5')))
new_rewards = self._sample_rewards()
self.assertLess(np.mean(old_rewards), np.mean(new_rewards))
def tearDown(self):
self.tempdir.cleanup()
super(TestBootstrap, self).tearDown()
|
class TestEnasConvModeler(testing_utils.TestCase):
def setUp(self):
self.session = tf.Session()
self.input_op = [architect.Operation('input', shape=(10, 4), name='input')]
self.output_op = architect.Operation('dense', units=1, activation='sigmoid', name='output')
self.x = np.random.choice(2, 40).reshape((1, 10, 4))
self.y = np.random.sample(1).reshape((1, 1))
(self.model_space, _) = testing_utils.get_example_conv1d_space()
self.model_compile_dict = {'loss': 'binary_crossentropy', 'optimizer': 'sgd'}
self.controller = architect.GeneralController(model_space=self.model_space, buffer_type='ordinal', with_skip_connection=True, kl_threshold=0.05, buffer_size=15, batch_size=5, session=self.session, train_pi_iter=2, lstm_size=32, lstm_num_layers=1, lstm_keep_prob=1.0, optim_algo='adam', skip_target=0.8, skip_weight=0.4)
self.target_arc = [0, 0, 1]
self.enas_modeler = modeler.EnasCnnModelBuilder(model_space=self.model_space, num_layers=len(self.model_space), inputs_op=self.input_op, output_op=self.output_op, model_compile_dict=self.model_compile_dict, session=self.session, controller=self.controller, batch_size=1, dag_kwargs={'stem_config': {'has_stem_conv': True, 'fc_units': 5}})
self.num_samps = 15
def test_sample_arc_builder(self):
model = self.enas_modeler()
samp_preds = [model.predict(self.x).flatten()[0] for _ in range(self.num_samps)]
self.assertNotEqual(len(set(samp_preds)), 1)
old_loss = [model.evaluate(self.x, self.y)['val_loss'] for _ in range(self.num_samps)]
model.fit(self.x, self.y, batch_size=1, epochs=100, verbose=0)
new_loss = [model.evaluate(self.x, self.y)['val_loss'] for _ in range(self.num_samps)]
self.assertLess(sum(new_loss), sum(old_loss))
def test_fix_arc_builder(self):
model = self.enas_modeler(arc_seq=self.target_arc)
fix_preds = [model.predict(self.x).flatten()[0] for _ in range(self.num_samps)]
self.assertEqual(len(set(fix_preds)), 1)
old_loss = model.evaluate(self.x, self.y)['val_loss']
model2 = self.enas_modeler()
model2.fit(self.x, self.y, batch_size=1, epochs=100, verbose=0)
new_loss = model.evaluate(self.x, self.y)['val_loss']
self.assertLess(new_loss, old_loss)
fix_preds = [model.predict(self.x).flatten()[0] for _ in range(self.num_samps)]
self.assertEqual(len(set(fix_preds)), 1)
|
class TestKerasBuilder(testing_utils.TestCase):
def setUp(self):
(self.model_space, _) = testing_utils.get_example_conv1d_space(num_layers=2)
self.target_arc = [0, 0, 1]
self.input_op = architect.Operation('input', shape=(10, 4), name='input')
self.output_op = architect.Operation('dense', units=1, activation='sigmoid', name='output')
self.model_compile_dict = {'loss': 'binary_crossentropy', 'optimizer': 'sgd'}
self.x = np.random.choice(2, 40).reshape((1, 10, 4))
self.y = np.random.sample(1).reshape((1, 1))
self.modeler = modeler.KerasResidualCnnBuilder(inputs_op=self.input_op, output_op=self.output_op, model_space=self.model_space, fc_units=5, flatten_mode='flatten', model_compile_dict=self.model_compile_dict)
def test_get_model(self):
model = self.modeler(self.target_arc)
old_loss = model.evaluate(self.x, self.y)
model.fit(self.x, self.y, batch_size=1, epochs=100, verbose=0)
new_loss = model.evaluate(self.x, self.y)
self.assertLess(new_loss, old_loss)
|
class TestKerasGetLayer(testing_utils.TestCase):
@parameterized.expand([((100,), 'dense', {'units': 4}), ((100,), 'identity', {}), ((100, 4), 'conv1d', {'filters': 5, 'kernel_size': 8}), ((100, 4), 'maxpool1d', {'pool_size': 4, 'strides': 4}), ((100, 4), 'avgpool1d', {'pool_size': 4, 'strides': 4}), ((100, 4), 'lstm', {'units': 2}), ((100, 4), 'flatten', {}), ((100, 4), 'globalavgpool1d', {}), ((100, 4), 'globalmaxpool1d', {}), ((100,), 'dropout', {'rate': 0.3}), ((100, 4), 'dropout', {'rate': 0.3}), ((100,), 'sparsek_vec', {}), ((100,), 'gaussian_noise', {'stddev': 1})])
def test_get_layers(self, input_shape, layer_name, layer_attr):
x = modeler.dag.get_layer(x=None, state=architect.Operation('Input', shape=input_shape))
operation = architect.Operation(layer_name, **layer_attr)
layer = modeler.dag.get_layer(x=x, state=operation)
self.assertIsInstance(layer, tf.Tensor)
|
def run_from_ipython():
try:
__IPYTHON__
return True
except NameError:
return False
|
def get_available_gpus():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if (x.device_type == 'GPU')]
|
def setup_logger(working_dir='.', verbose_level=logging.INFO):
'The logging used by throughout the training envrionment\n\n Parameters\n ----------\n working_dir : str\n File path to working directory. Logging will be stored in working directory.\n\n verbose_level : int\n Verbosity level; can be specified as in ``logging``\n\n Returns\n -------\n logger : the logging object\n '
logger = logging.getLogger('AMBER')
logger.setLevel(verbose_level)
fh = logging.FileHandler(os.path.join(working_dir, 'log.AMBER.txt'))
fh.setLevel(verbose_level)
ch = logging.StreamHandler()
ch.setLevel(verbose_level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s -\n %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger
|
def get_session(gpu_fraction=0.75):
'Assume that you have 6GB of GPU memory and want to allocate ~2GB'
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
print(('with nthreads=%s' % num_threads))
return tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options))
|
def get_session2(CPU, GPU):
if GPU:
num_GPU = 1
num_CPU = 1
if CPU:
num_CPU = 1
num_GPU = 0
config = tf.ConfigProto(intra_op_parallelism_threads=num_threads, inter_op_parallelism_threads=num_threads, allow_soft_placement=True, device_count={'CPU': num_CPU, 'GPU': num_GPU})
session = tf.Session(config=config)
K.set_session(session)
|
class TestCase(tf.test.TestCase):
def tearDown(self):
tf.keras.backend.clear_session()
keras.backend.clear_session()
super(TestCase, self).tearDown()
|
class PseudoModel():
def __init__(self, pred_retr, eval_retr):
self.pred_retr = pred_retr
self.eval_retr = eval_retr
def predict(self, *args, **kwargs):
return self.pred_retr
def evaluate(self, *args, **kwargs):
return self.eval_retr
def fit(self, *args, **kwargs):
pass
def load_weights(self, *args, **kwargs):
pass
|
class PseudoKnowledge():
def __init__(self, k_val):
self.k_val = k_val
def __call__(self, *args, **kwargs):
return self.k_val
|
class PseudoConv1dModelBuilder():
def __init__(self, input_shape, output_units, model_compile_dict=None):
self.input_shape = input_shape
self.output_units = output_units
self.model_compile_dict = (model_compile_dict or {'optimizer': 'sgd', 'loss': 'mse'})
self.session = None
def __call__(self, *args, **kwargs):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv1D(filters=4, kernel_size=1, input_shape=self.input_shape))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=self.output_units))
model.compile(**self.model_compile_dict)
return model
|
class PseudoCaller():
def __init__(self, retr_val=0):
self.retr_val = retr_val
def __call__(self, *args, **kwargs):
return self.retr_val
|
class PseudoReward(PseudoCaller):
def __init__(self, retr_val=0):
self.retr_val = retr_val
self.knowledge_function = None
def __call__(self, *args, **kwargs):
return (self.retr_val, [self.retr_val], None)
|
def get_example_conv1d_space(out_filters=8, num_layers=2):
model_space = architect.ModelSpace()
num_pool = 1
expand_layers = [((num_layers // k) - 1) for k in range(1, num_pool)]
layer_sharing = {}
for i in range(num_layers):
model_space.add_layer(i, [architect.Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu'), architect.Operation('maxpool1d', filters=out_filters, pool_size=4, strides=1), architect.Operation('identity', filters=out_filters)])
if (i in expand_layers):
out_filters *= 2
if (i > 0):
layer_sharing[i] = 0
return (model_space, layer_sharing)
|
class TestEncodedGenome(unittest.TestCase):
def setUp(self):
self.bases = ['A', 'C', 'G', 'T']
self.bases_to_arr = dict(A=numpy.array([1, 0, 0, 0]), C=numpy.array([0, 1, 0, 0]), G=numpy.array([0, 0, 1, 0]), T=numpy.array([0, 0, 0, 1]), N=numpy.array([0.25, 0.25, 0.25, 0.25]))
self.chrom_to_lens = {'seq0': 10, 'seq1': 25, 'seq2': 24}
self.in_memory = False
def _get_small_genome(self):
return EncodedGenome('amber/utils/tests/files/small_genome.fa', in_memory=self.in_memory)
def test_load_mixed_case_sequence(self):
expected = [[1, 0, 0, 0], [1, 0, 0, 0], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 1, 0]]
expected = numpy.array(expected)
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq0', 0, 10)
self.assertSequenceEqual(expected.tolist(), observed.tolist())
def test_load_sequence0(self):
expected = [[0, 1, 0, 0]]
expected = numpy.array(expected)
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq0', 5, 6)
self.assertSequenceEqual(expected.tolist(), observed.tolist())
def test_load_sequence1(self):
expected = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected = numpy.array(expected)
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq2', 22, 24)
self.assertSequenceEqual(expected.tolist(), observed.tolist())
def test_load_rc(self):
expected = 'CCAAGGNNTT'
expected = [[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0, 0, 0, 1], [0, 0, 0, 1]]
expected = numpy.array(expected)
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq0', 0, 10, '-')
self.assertSequenceEqual(expected.tolist(), observed.tolist())
def test_coords_flipped(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 2, 1)
self.assertFalse(observed)
def test_end_coord_too_large(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 9, 11)
self.assertFalse(observed)
def test_both_coords_too_large(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 11, 14)
self.assertFalse(observed)
def test_coords_too_large_negative(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', (- 100), (- 99))
self.assertFalse(observed)
def test_coords_negative_flipped(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', (- 1), (- 2))
self.assertFalse(observed)
def test_coords_negative(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', (- 2), (- 1))
self.assertFalse(observed)
def test_bad_chrom(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq-1', 1, 2)
self.assertFalse(observed)
def test_too_small(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 1, 1)
self.assertFalse(observed)
def test_first_coord(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 0, 1)
self.assertTrue(observed)
def test_last_coord(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 9, 10)
self.assertTrue(observed)
def test_in_memory(self):
self.assertFalse(self.in_memory)
def test_genome_in_memory(self):
g = self._get_small_genome()
self.assertFalse(g.in_memory)
|
class TestEncodedGenomeInMemory(TestEncodedGenome):
def setUp(self):
super(TestEncodedGenomeInMemory, self).setUp()
self.in_memory = True
def test_in_memory(self):
self.assertTrue(self.in_memory)
def test_genome_in_memory(self):
g = self._get_small_genome()
self.assertTrue(g.in_memory)
|
class TestEncodedHDF5Genome(unittest.TestCase):
def setUp(self):
self.bases = ['A', 'C', 'G', 'T']
self.bases_to_arr = dict(A=numpy.array([1, 0, 0, 0]), C=numpy.array([0, 1, 0, 0]), G=numpy.array([0, 0, 1, 0]), T=numpy.array([0, 0, 0, 1]), N=numpy.array([0.25, 0.25, 0.25, 0.25]))
self.chrom_to_lens = {'seq0': 10, 'seq1': 25, 'seq2': 24}
self.in_memory = False
def _get_small_genome(self):
return EncodedHDF5Genome('amber/utils/tests/files/small_genome.encoded.h5', in_memory=self.in_memory)
def test_load_mixed_case_sequence(self):
expected = [[1, 0, 0, 0], [1, 0, 0, 0], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 1, 0]]
expected = numpy.array(expected)
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq0', 0, 10)
self.assertSequenceEqual(expected.tolist(), observed.tolist())
def test_load_sequence0(self):
expected = [[0, 1, 0, 0]]
expected = numpy.array(expected)
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq0', 5, 6)
self.assertSequenceEqual(expected.tolist(), observed.tolist())
def test_load_sequence1(self):
expected = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected = numpy.array(expected)
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq2', 22, 24)
self.assertSequenceEqual(expected.tolist(), observed.tolist())
def test_load_rc(self):
expected = 'CCAAGGNNTT'
expected = [[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0, 0, 0, 1], [0, 0, 0, 1]]
expected = numpy.array(expected)
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq0', 0, 10, '-')
self.assertSequenceEqual(expected.tolist(), observed.tolist())
def test_coords_flipped(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 2, 1)
self.assertFalse(observed)
def test_end_coord_too_large(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 9, 11)
self.assertFalse(observed)
def test_both_coords_too_large(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 11, 14)
self.assertFalse(observed)
def test_coords_too_large_negative(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', (- 100), (- 99))
self.assertFalse(observed)
def test_coords_negative_flipped(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', (- 1), (- 2))
self.assertFalse(observed)
def test_coords_negative(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', (- 2), (- 1))
self.assertFalse(observed)
def test_bad_chrom(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq-1', 1, 2)
self.assertFalse(observed)
def test_too_small(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 1, 1)
self.assertFalse(observed)
def test_first_coord(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 0, 1)
self.assertTrue(observed)
def test_last_coord(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 9, 10)
self.assertTrue(observed)
def test_in_memory(self):
self.assertFalse(self.in_memory)
def test_genome_in_memory(self):
g = self._get_small_genome()
self.assertFalse(g.in_memory)
|
class TestEncodedHDF5GenomeInMemory(TestEncodedHDF5Genome):
def setUp(self):
super(TestEncodedHDF5GenomeInMemory, self).setUp()
self.in_memory = True
def test_in_memory(self):
self.assertTrue(self.in_memory)
def test_genome_in_memory(self):
g = self._get_small_genome()
self.assertTrue(g.in_memory)
|
class TestGenome(unittest.TestCase):
def setUp(self):
self.bases = ['A', 'C', 'G', 'T']
self.bases_to_arr = dict(A=numpy.array([1, 0, 0, 0]), C=numpy.array([0, 1, 0, 0]), G=numpy.array([0, 0, 1, 0]), T=numpy.array([0, 0, 0, 1]), N=numpy.array([0.25, 0.25, 0.25, 0.25]))
self.chrom_to_lens = {'seq0': 10, 'seq1': 25, 'seq2': 24}
self.in_memory = False
def _get_small_genome(self):
return Genome('amber/utils/tests/files/small_genome.fa', in_memory=self.in_memory)
def test_load_mixed_case_sequence(self):
expected = 'AANNCCTTGG'
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq0', 0, 10)
self.assertEqual(expected, observed)
def test_load_rc(self):
expected = 'CCAAGGNNTT'
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq0', 0, 10, '-')
self.assertEqual(expected, observed)
def test_load_sequence0(self):
expected = 'C'
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq1', 5, 6)
self.assertEqual(expected, observed)
def test_load_sequence1(self):
expected = 'A'
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq2', 0, 1)
self.assertEqual(expected, observed)
def test_load_sequence2(self):
expected = ((((('A' * 5) + ('C' * 5)) + ('T' * 5)) + ('G' * 5)) + ('N' * 5))
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq1', 0, 25)
self.assertEqual(expected, observed)
def test_load_sequence3(self):
expected = ('ACTG' * 6)
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq2', 0, 24)
self.assertEqual(expected, observed)
def test_load_sequence4(self):
expected = ((('T' * 5) + ('G' * 5)) + ('N' * 5))
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq1', 10, 25)
self.assertEqual(expected, observed)
def test_length(self):
expected = sum(self.chrom_to_lens.values())
g = self._get_small_genome()
observed = len(g)
self.assertEqual(expected, observed)
def test_coords_flipped(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 2, 1)
self.assertFalse(observed)
def test_end_coord_too_large(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 9, 11)
self.assertFalse(observed)
def test_both_coords_too_large(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 11, 14)
self.assertFalse(observed)
def test_coords_too_large_negative(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', (- 100), (- 99))
self.assertFalse(observed)
def test_coords_negative_flipped(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', (- 1), (- 2))
self.assertFalse(observed)
def test_coords_negative(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', (- 2), (- 1))
self.assertFalse(observed)
def test_bad_chrom(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq-1', 1, 2)
self.assertFalse(observed)
def test_too_small(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 1, 1)
self.assertFalse(observed)
def test_first_coord(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 0, 1)
self.assertTrue(observed)
def test_last_coord(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 9, 10)
self.assertTrue(observed)
def test_in_memory(self):
self.assertFalse(self.in_memory)
def test_genome_in_memory(self):
g = self._get_small_genome()
self.assertFalse(g.in_memory)
|
class TestGenomeInMemory(TestGenome):
def setUp(self):
super(TestGenomeInMemory, self).setUp()
self.in_memory = True
def test_in_memory(self):
self.assertTrue(self.in_memory)
def test_genome_in_memory(self):
g = self._get_small_genome()
self.assertTrue(g.in_memory)
|
class TestHDF5Genome(unittest.TestCase):
def setUp(self):
self.bases = ['A', 'C', 'G', 'T']
self.bases_to_arr = dict(A=numpy.array([1, 0, 0, 0]), C=numpy.array([0, 1, 0, 0]), G=numpy.array([0, 0, 1, 0]), T=numpy.array([0, 0, 0, 1]), N=numpy.array([0.25, 0.25, 0.25, 0.25]))
self.chrom_to_lens = {'seq0': 10, 'seq1': 25, 'seq2': 24}
self.in_memory = False
def _get_small_genome(self):
return HDF5Genome('amber/utils/tests/files/small_genome.h5', in_memory=self.in_memory)
def test_load_mixed_case_sequence(self):
expected = 'AANNCCTTGG'
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq0', 0, 10)
self.assertEqual(expected, observed)
def test_load_rc(self):
expected = 'CCAAGGNNTT'
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq0', 0, 10, '-')
self.assertEqual(expected, observed)
def test_load_sequence0(self):
expected = 'C'
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq1', 5, 6)
self.assertEqual(expected, observed)
def test_load_sequence1(self):
expected = 'A'
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq2', 0, 1)
self.assertEqual(expected, observed)
def test_load_sequence2(self):
expected = ((((('A' * 5) + ('C' * 5)) + ('T' * 5)) + ('G' * 5)) + ('N' * 5))
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq1', 0, 25)
self.assertEqual(expected, observed)
def test_load_sequence3(self):
expected = ('ACTG' * 6)
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq2', 0, 24)
self.assertEqual(expected, observed)
def test_load_sequence4(self):
expected = ((('T' * 5) + ('G' * 5)) + ('N' * 5))
g = self._get_small_genome()
observed = g.get_sequence_from_coords('seq1', 10, 25)
self.assertEqual(expected, observed)
def test_length(self):
expected = sum(self.chrom_to_lens.values())
g = self._get_small_genome()
observed = len(g)
self.assertEqual(expected, observed)
def test_coords_flipped(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 2, 1)
self.assertFalse(observed)
def test_end_coord_too_large(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 9, 11)
self.assertFalse(observed)
def test_both_coords_too_large(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 11, 14)
self.assertFalse(observed)
def test_coords_too_large_negative(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', (- 100), (- 99))
self.assertFalse(observed)
def test_coords_negative_flipped(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', (- 1), (- 2))
self.assertFalse(observed)
def test_coords_negative(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', (- 2), (- 1))
self.assertFalse(observed)
def test_bad_chrom(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq-1', 1, 2)
self.assertFalse(observed)
def test_too_small(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 1, 1)
self.assertFalse(observed)
def test_first_coord(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 0, 1)
self.assertTrue(observed)
def test_last_coord(self):
g = self._get_small_genome()
observed = g.coords_are_valid('seq0', 9, 10)
self.assertTrue(observed)
def test_in_memory(self):
self.assertFalse(self.in_memory)
def test_genome_in_memory(self):
g = self._get_small_genome()
self.assertFalse(g.in_memory)
|
class TestHDF5GenomeInMemory(TestHDF5Genome):
def setUp(self):
super(TestHDF5GenomeInMemory, self).setUp()
self.in_memory = True
def test_in_memory(self):
self.assertTrue(self.in_memory)
def test_genome_in_memory(self):
g = self._get_small_genome()
self.assertTrue(g.in_memory)
|
class Amber():
'The main wrapper class for AMBER\n\n This class facilitates the GUI and TUI caller, and should always be maintained\n\n Parameters\n ----------\n types: dict\n specs: dict\n\n Attributes\n ----------\n type_dict: dict\n is_built: bool\n model_space: amber.architect.ModelSpace\n controller: amber.architect.BaseController\n\n\n Example\n ----------\n PENDING EDITION\n '
def __init__(self, types, specs=None):
self.type_dict = types
self.is_built = False
self.model_space = None
self.controller = None
self.model_fn = None
self.knowledge_fn = None
self.reward_fn = None
self.manager = None
self.env = None
self.session = Session()
try:
K.set_session(self.session)
except Exception as e:
print(('Failed to set Keras backend becasue of %s' % e))
if (specs is not None):
self.from_dict(specs)
def from_dict(self, d):
assert (type(d) is dict)
print('BUILDING')
print(('-' * 10))
self.model_space = getter.get_model_space(d['model_space'])
self.controller = getter.get_controller(controller_type=self.type_dict['controller_type'], model_space=self.model_space, session=self.session, **d['controller'])
self.model_fn = getter.get_modeler(model_fn_type=self.type_dict['modeler_type'], model_space=self.model_space, session=self.session, controller=self.controller, **d['model_builder'])
self.knowledge_fn = getter.get_knowledge_fn(knowledge_fn_type=self.type_dict['knowledge_fn_type'], knowledge_data_dict=d['knowledge_fn']['data'], **d['knowledge_fn']['params'])
self.reward_fn = getter.get_reward_fn(reward_fn_type=self.type_dict['reward_fn_type'], knowledge_fn=self.knowledge_fn, **d['reward_fn'])
self.manager = getter.get_manager(manager_type=self.type_dict['manager_type'], model_fn=self.model_fn, reward_fn=self.reward_fn, data_dict=d['manager']['data'], session=self.session, **d['manager']['params'])
self.env = getter.get_train_env(env_type=self.type_dict['env_type'], controller=self.controller, manager=self.manager, **d['train_env'])
self.is_built = True
return self
def run(self):
assert self.is_built
self.env.train()
self.controller.save_weights(os.path.join(self.env.working_dir, 'controller_weights.h5'))
|
def convert_file(input_file, output_file):
fasta = pyfaidx.Fasta(input_file)
h5 = h5py.File(output_file, 'w')
for k in fasta.keys():
s = str(fasta[k][:].seq).upper()
ds = h5.create_dataset(k, (len(s),), dtype='S1')
for i in range(len(s)):
ds[i] = numpy.string_(s[i])
h5.close()
|
def draw_samples(genome_file, bed_file, output_file, feature_name_file, bin_size, cvg_frac, n_examples, chrom_pad, chrom_pattern, max_unk, interval_file):
feature_name_set = set()
i_to_feature_name = list()
feature_name_to_i = dict()
i = 0
with open(feature_name_file, 'r') as read_file:
for line in read_file:
line = line.strip()
if line:
feature_name_set.add(line)
i_to_feature_name.append(line)
feature_name_to_i[line] = i
i += 1
n_feats = len(i_to_feature_name)
print('Loaded features', file=sys.stderr)
genome = pyfaidx.Fasta(genome_file)
chroms = list()
chrom_lens = list()
max_examples = 0
for k in genome.keys():
if (chrom_pattern.match(k) is not None):
l = len(genome[k])
if (l > (chrom_pad * 3)):
for s in ['+', '-']:
chroms.append((s, k))
chrom_lens.append(((l - (2 * chrom_pad)) - bin_size))
n_chrom = len(chroms)
chrom_to_i = {k: i for (i, k) in enumerate(chroms)}
print('Loaded chroms', file=sys.stderr)
chrom_bound_ivals = {k: list() for k in chrom_to_i.values()}
chrom_bound_ival_weights = {k: list() for k in chrom_to_i.values()}
chrom_lens = numpy.array(chrom_lens)
if (interval_file is None):
chrom_weighting_lens = chrom_lens.copy()
for (x, chrom_len) in zip(chroms, chrom_lens.tolist()):
c_i = chrom_to_i[x]
chrom_bound_ivals[c_i].append((chrom_pad, (chrom_len + chrom_pad)))
chrom_bound_ival_weights[c_i].append(chrom_len)
else:
chrom_weighting_lens = numpy.zeros_like(chrom_lens)
with open(interval_file, 'r') as read_file:
for (line_i, line) in enumerate(read_file):
line = line.strip()
if line:
if (not line.startswith('#')):
line = line.split('\t')
if (len(line) != 6):
s = 'Found that line #{} has {} elements and not 6'.format(line_i, len(line))
raise ValueError(s)
(chrom, start, end, _, _, strand) = line
start = int(start)
end = int(end)
(start, end) = (min(start, (end - 1)), max((start + 1), end))
if ((strand, chrom) in chrom_to_i):
i = chrom_to_i[(strand, chrom)]
start = max(start, chrom_pad)
end = min(end, ((chrom_lens[i] - chrom_pad) - bin_size))
if ((end - start) <= 0):
continue
chrom_bound_ivals[i].append((start, end))
dist = abs((end - start))
chrom_weighting_lens[i] += dist
chrom_bound_ival_weights[i].append(dist)
print('Loaded chrom ivals', file=sys.stderr)
chrom_weights = (chrom_weighting_lens / chrom_weighting_lens.sum())
max_examples = numpy.sum(chrom_weighting_lens)
if (max_examples < n_examples):
msg = 'Got {} max examples possible, but need {} examples'.format(max_examples, n_examples)
raise ValueError(msg)
ivt = {k: {kk: bx.intervals.intersection.IntervalTree() for kk in feature_name_to_i.values()} for k in chrom_to_i.values()}
with gzip.open(bed_file, 'rt') as read_file:
for line in read_file:
line = line.strip()
if (not line.startswith('#')):
line = line.split('\t')
(chrom, start, end, name) = line[:4]
if (len(line) >= 6):
if (line[5] == '.'):
strand = ['+', '-']
else:
strand = [line[5]]
else:
strand = ['+', '-']
start = int(start)
end = int(end)
if (name in feature_name_set):
for x in strand:
i = chrom_to_i[(x, chrom)]
ivt[i][feature_name_to_i[name]].insert_interval(bx.intervals.intersection.Interval(start, end))
print('Loaded labels', file=sys.stderr)
outputs = list()
i = 0
while (i < n_examples):
c_i = numpy.random.choice(n_chrom, p=chrom_weights)
(strand, chrom) = chroms[c_i]
try:
ival_bin_i = numpy.random.choice(len(chrom_bound_ivals[c_i]), p=(numpy.array(chrom_bound_ival_weights[c_i]).flatten() / chrom_weighting_lens[c_i]))
except:
print('***FAILED ON RANDOM CHOICE', c_i, chrom_bound_ivals[c_i], chrom_weights, chrom_weighting_lens, sep='\n', flush=True, file=sys.stderr)
raise RuntimeError()
(start, end) = chrom_bound_ivals[c_i].pop(ival_bin_i)
cur_weight = chrom_bound_ival_weights[c_i].pop(ival_bin_i)
try:
pos = (numpy.random.choice((end - start)) + start)
except:
print('***FAILED ON RANDOM CHOICE', c_i, chrom_bound_ivals[c_i], chrom_weights, chrom_weighting_lens, sep='\n', flush=True, file=sys.stderr)
raise RuntimeError()
if ((end - start) > 1):
if (pos == start):
chrom_bound_ival_weights[c_i].append((cur_weight - 1))
chrom_bound_ivals[c_i].append(((start + 1), end))
elif (pos == (end - 1)):
chrom_bound_ival_weights[c_i].append((cur_weight - 1))
chrom_bound_ivals[c_i].append((start, (end - 1)))
else:
chrom_bound_ivals[c_i].append((start, pos))
chrom_bound_ival_weights[c_i].append((pos - start))
chrom_bound_ivals[c_i].append(((pos + 1), end))
chrom_bound_ival_weights[c_i].append((end - (pos + 1)))
start = pos
end = (pos + bin_size)
chrom_weighting_lens[c_i] -= 1
chrom_weights = (chrom_weighting_lens / numpy.sum(chrom_weighting_lens))
cvg = numpy.zeros(n_feats)
for feat_i in feature_name_to_i.values():
for x in ivt[c_i][feat_i].find(start, end):
cvg[feat_i] += (min(x.end, end) - max(x.start, start))
cvg /= bin_size
cvg = (cvg > cvg_frac).astype(int).tolist()
outputs.append((chrom, start, end, strand, *cvg))
i += 1
if ((i % 10000) == 0):
print(i)
print('Loaded all examples.', file=sys.stderr)
with open(output_file, 'w') as write_file:
for x in sorted(outputs):
x = [str(y) for y in list(x)]
write_file.write(('\t'.join(x) + '\n'))
print('Wrote examples.', file=sys.stderr)
|
def split_samples(input_file, feature_name_file, output_dir):
i_to_feat = list()
with open(feature_name_file, 'r') as read_file:
for line in read_file:
line = line.strip()
if line:
i_to_feat.append(line)
feat_to_i = dict()
for (i, feat) in enumerate(i_to_feat):
feat_to_i[feat] = i
i_to_fp = list()
for feat in i_to_feat:
f = '{}.txt'.format(feat)
f = os.path.join(output_dir, f)
i_to_fp.append(open(f, 'w'))
with open(input_file, 'r') as read_file:
for line in read_file:
line = line.strip()
if line:
line = line.split('\t')
stub = ('\t'.join(line[:4]) + '\t')
for (i, fp) in enumerate(i_to_fp):
fp.write(((stub + line[(4 + i)]) + '\n'))
for x in i_to_fp:
x.close()
|
def download_data_from_s3(task):
'Download pde data from s3 to store in temp directory'
s3_base = 'https://pde-xd.s3.amazonaws.com'
download_directory = '.'
if (task == 'ECG'):
data_files = ['challenge2017.pkl']
s3_folder = 'ECG'
elif (task == 'satellite'):
data_files = ['satellite_train.npy', 'satellite_test.npy']
s3_folder = 'satellite'
elif (task == 'deepsea'):
data_files = ['deepsea_filtered.npz']
s3_folder = 'deepsea'
else:
raise NotImplementedError
for data_file in data_files:
if (not os.path.exists(data_file)):
fileurl = ((((s3_base + '/') + s3_folder) + '/') + data_file)
urlretrieve(fileurl, data_file)
return None
|
def main():
task = sys.argv[1]
download_data_from_s3(task)
|
def convert_timedelta(duration):
(days, seconds) = (duration.days, duration.seconds)
hours = ((days * 24) + (seconds // 3600))
minutes = ((seconds % 3600) // 60)
seconds = (seconds % 60)
return (hours, minutes, seconds)
|
def dataset_split(set_, train_prop, test_prop):
df = pd.read_csv('./data/final_df.csv')
df['genename_num'] = df['genename'].str.cat(df['Number'].astype(str), sep='-')
samp_lst = df['genename_num'].tolist()
np.random.seed(115)
np.random.shuffle(samp_lst)
num_test = int((len(samp_lst) * train_prop))
(train, test_val) = (samp_lst[:num_test], samp_lst[num_test:])
test_prop /= (1 - train_prop)
num_test = int((len(test_val) * test_prop))
(test, val) = (test_val[:num_test], test_val[num_test:])
if (set_ == 'train'):
return train
if (set_ == 'val'):
return val
if (set_ == 'test'):
return test
|