code
stringlengths 17
6.64M
|
---|
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
'\n\n Parameters\n ----------\n radius : float\n radius of the balls\n nsample : int\n maximum number of features in the balls\n xyz : torch.Tensor\n (B, N, 3) xyz coordinates of the features\n new_xyz : torch.Tensor\n (B, npoint, 3) centers of the ball query\n\n Returns\n -------\n torch.Tensor\n (B, npoint, nsample) tensor with the indicies of the features that form the query balls\n '
output = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_out):
return ()
|
class QueryAndGroup(nn.Module):
'\n Groups with a ball query of radius\n\n Parameters\n ---------\n radius : float32\n Radius of ball\n nsample : int32\n Maximum number of features to gather in the ball\n '
def __init__(self, radius, nsample, use_xyz=True):
super(QueryAndGroup, self).__init__()
(self.radius, self.nsample, self.use_xyz) = (radius, nsample, use_xyz)
def forward(self, xyz, new_xyz, features=None):
'\n Parameters\n ----------\n xyz : torch.Tensor\n xyz coordinates of the features (B, N, 3)\n new_xyz : torch.Tensor\n centriods (B, npoint, 3)\n features : torch.Tensor\n Descriptors of the features (B, C, N)\n\n Returns\n -------\n new_features : torch.Tensor\n (B, 3 + C, npoint, nsample) tensor\n '
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze((- 1))
if (features is not None):
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1)
else:
new_features = grouped_features
else:
assert self.use_xyz, 'Cannot have not features and not use xyz as a feature!'
new_features = grouped_xyz
return new_features
|
class GroupAll(nn.Module):
'\n Groups all features\n\n Parameters\n ---------\n '
def __init__(self, use_xyz=True):
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
'\n Parameters\n ----------\n xyz : torch.Tensor\n xyz coordinates of the features (B, N, 3)\n new_xyz : torch.Tensor\n Ignored\n features : torch.Tensor\n Descriptors of the features (B, C, N)\n\n Returns\n -------\n new_features : torch.Tensor\n (B, C + 3, 1, N) tensor\n '
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if (features is not None):
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
|
class HiddenPrints():
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
|
def multiprocess(func):
p = Pool(80)
p.map(func, idx_list)
p.close()
p.join()
|
def pds(idx):
if 1:
print(os.path.join(save_path_list[idx], ('%d.npy' % num_point)), ' is preparing.')
ms_set = pymeshlab.MeshSet()
ms_set.load_new_mesh(os.path.join(os.path.join(path_list[idx], 'scaled_model.off')))
ms_set.generate_sampling_poisson_disk(samplenum=int((num_point / (1 - 0.006))), exactnumflag=True)
pc = np.array(ms_set.current_mesh().vertex_matrix())
if (not os.path.exists(save_path_list[idx])):
os.makedirs(save_path_list[idx])
np.save(os.path.join(save_path_list[idx], ('%d.npy' % num_point)), pc.astype(np.float32))
print(os.path.join(save_path_list[idx], ('%d.npy' % num_point)), ' is prepared.')
del pc, ms_set
else:
print('skip ', os.path.join(save_path_list[idx], ('%d.npy' % num_point)))
|
class HiddenPrints():
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
|
def multiprocess(func):
p = Pool(20)
p.map(func, idx_list)
p.close()
p.join()
|
def sample(idx):
mesh = trimesh.load(os.path.join(path_list[idx], 'scaled_model.off'))
points = mesh.sample(int(num_sample))
if (not os.path.exists(save_path_list[idx])):
os.makedirs(os.path.join(save_path_list[idx]))
np.save(os.path.join(save_path_list[idx], 'points.npy'), points.astype(np.float32))
print(os.path.join(save_path_list[idx], 'points.npy'))
|
class HiddenPrints():
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
|
def multiprocess(func):
p = Pool(20)
p.map(func, idx_list)
p.close()
p.join()
|
def sample(idx):
if 1:
boundary_points_list = []
df_list = []
closest_points_list = []
for i in range(3):
ratio = ratio_list[i]
std = std_list[i]
mesh = trimesh.load(os.path.join(path_list[idx], 'scaled_model.off'))
points = mesh.sample(int((num_sample * ratio)))
noise = (np.random.randn(*points.shape) * std)
boundary_points = (points + noise)
(df, fi, bc) = pcu.closest_points_on_mesh(boundary_points, mesh.vertices, mesh.faces)
closest_points = pcu.interpolate_barycentric_coords(mesh.faces, fi, bc, mesh.vertices)
boundary_points_list.append(boundary_points)
df_list.append(df)
closest_points_list.append(closest_points)
boundary_points_list = np.concatenate(boundary_points_list, axis=0)
df_list = np.concatenate(df_list, axis=0)
closest_points_list = np.concatenate(closest_points_list, axis=0)
if (not os.path.exists(save_path_list[idx])):
os.makedirs(os.path.join(save_path_list[idx]))
np.savez(os.path.join(save_path_list[idx], 'sample_gauss.npz'), points=boundary_points_list.astype(np.float32), df=df_list.astype(np.float32), closest_points=closest_points_list.astype(np.float32))
print(os.path.join(save_path_list[idx], 'sample_gauss.npz'))
|
def as_mesh(scene_or_mesh):
'\n Convert a possible scene to a mesh.\n\n If conversion occurs, the returned mesh has only vertex and face data.\n Suggested by https://github.com/mikedh/trimesh/issues/507\n '
if isinstance(scene_or_mesh, trimesh.Scene):
if (len(scene_or_mesh.geometry) == 0):
mesh = None
else:
mesh = trimesh.util.concatenate(tuple((trimesh.Trimesh(vertices=g.vertices, faces=g.faces) for g in scene_or_mesh.geometry.values())))
else:
assert isinstance(scene_or_mesh, trimesh.Trimesh)
mesh = scene_or_mesh
return mesh
|
def scalled_off(idx):
input = trimesh.load(os.path.join(base_path_1[idx], 'isosurf.obj'))
mesh = as_mesh(input)
total_size = (mesh.bounds[1] - mesh.bounds[0]).max()
centers = ((mesh.bounds[1] + mesh.bounds[0]) / 2)
mesh.apply_translation((- centers))
mesh.apply_scale((1 / total_size))
if (not os.path.exists(save_path_1[idx])):
os.makedirs(save_path_1[idx])
mesh.export(os.path.join(save_path_1[idx], 'scaled_model.off'))
print(os.path.join(save_path_1[idx], 'scaled_model.off'))
|
def multiprocess(func):
p = Pool(16)
p.map(func, idx_list)
p.close()
p.join()
|
def split(x):
return x.lower().split()
|
def length(sequence):
used = tf.sign(tf.reduce_max(tf.abs(sequence), reduction_indices=2))
length = tf.reduce_sum(used, reduction_indices=1)
length = tf.cast(length, tf.int32)
return length
|
def cost(output, target):
cross_entropy = (target * tf.log(output))
cross_entropy = (- tf.reduce_sum(cross_entropy, reduction_indices=2))
mask = tf.sign(tf.reduce_max(tf.abs(target), reduction_indices=2))
cross_entropy *= mask
cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1)
cross_entropy /= tf.reduce_sum(mask, reduction_indices=1)
return tf.reduce_mean(cross_entropy, name='loss')
|
def activate(outputs, weight_shape, bias_shape, activation=tf.nn.softmax):
dim_str = {3: 'ijk,kl->ijl', 2: 'ij,jk->ik'}
weights = tf.get_variable('weights', shape=weight_shape, initializer=tf.random_normal_initializer())
biases = tf.get_variable('biases', shape=bias_shape, initializer=tf.constant_initializer(0.0))
if (outputs.get_shape().ndims == 2):
result = activation((tf.matmul(outputs, weights) + biases))
else:
result = activation((tf.reshape(tf.matmul(tf.reshape(outputs, [(- 1), weight_shape[0]]), weights), [(- 1), outputs.get_shape().as_list()[1], weight_shape[1]]) + biases))
return result
|
def rmse_loss(outputs, targets):
return tf.sqrt(tf.reduce_mean(tf.square(tf.sub(targets, outputs))))
|
def pad(x, max_length, pad_constant=(- 1)):
x = list(x)
for i in range(len(x)):
x[i] += ([pad_constant] * (max_length - len(x[i])))
x[i] = np.array(x[i])
return x
|
def get_batch_pos(obj, size=5):
idx = np.random.choice(range(len(obj.sent)), size=size, replace=False)
p = pad(obj.pos[idx], obj.max_length, (- 1))
s = pad(obj.sent[idx], obj.max_length, (obj.vec.shape[0] - 1))
c = pad(obj.chun[idx], obj.max_length, (- 1))
return (s, p, c)
|
def get_batch_sent(obj, size=5):
idx = np.random.choice(range(len(obj.sent1)), size=size, replace=False)
s1 = pad(obj.sent1[idx], obj.max_length, (obj.vec.shape[0] - 1))
s2 = pad(obj.sent2[idx], obj.max_length, (obj.vec.shape[0] - 1))
r = obj.rel[idx]
e = obj.ent[idx]
return (s1, s2, r.values, e.values)
|
class JMT():
def __init__(self, dim, reg_lambda, lr=0.01):
self.dim = dim
self.reg_lambda = reg_lambda
self.lr = lr
def load_data(self):
data = np.load('data/data.npz')['data'].item()
self.sent = data['word_level']['sent'].values
self.pos = data['word_level']['pos']
self.i2p = data['word_level']['i2p']
self.i2c = data['word_level']['i2c']
self.chun = data['word_level']['chunk']
self.sent1 = data['sent_level']['sent1']
self.sent2 = data['sent_level']['sent2']
self.i2e = data['sent_level']['i2e']
self.rel = data['sent_level']['rel']
self.ent = data['sent_level']['entailment']
self.w2i = data['w2i']
self.vec = np.array((data['vec'] + [([0] * 300)]))
self.max_length = max([len(i) for i in self.sent])
print('***Data loaded***')
def build_model(self):
' Builds the whole computational graph '
def sentence_op(inputs, t_pos, t_chunk):
with tf.variable_scope('pos'):
embeddings = tf.constant(self.vec, dtype=tf.float32)
embeds = tf.nn.embedding_lookup(embeddings, inputs)
fw_lstm = MyLSTM(self.dim, state_is_tuple=True)
bw_lstm = MyLSTM(self.dim, state_is_tuple=True)
(outputs, _) = tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_lstm, cell_bw=bw_lstm, inputs=embeds, sequence_length=length(embeds), dtype=tf.float32)
concat_outputs = tf.concat(2, outputs)
y_pos = activate(concat_outputs, [(self.dim * 2), len(self.i2p)], [len(self.i2p)])
t_pos_sparse = tf.one_hot(indices=t_pos, depth=len(self.i2p), axis=(- 1))
loss = cost(y_pos, t_pos_sparse)
loss += tf.reduce_sum([(self.reg_lambda * tf.nn.l2_loss(x)) for x in tf.trainable_variables()])
optimize_op = tf.train.AdagradOptimizer(self.lr).minimize(loss)
with tf.variable_scope('chunk'):
inputs1 = tf.concat(2, [embeds, concat_outputs, y_pos])
fw_lstm = MyLSTM(self.dim, state_is_tuple=True)
bw_lstm = MyLSTM(self.dim, state_is_tuple=True)
(outputs1, _) = tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_lstm, cell_bw=bw_lstm, inputs=inputs1, sequence_length=length(embeds), dtype=tf.float32)
concat_outputs1 = tf.concat(2, outputs1)
y_chunk = activate(concat_outputs1, [(self.dim * 2), len(self.i2c)], [len(self.i2c)])
t_chunk_sparse = tf.one_hot(indices=t_chunk, depth=len(self.i2c), axis=(- 1))
loss1 = cost(y_chunk, t_chunk_sparse)
loss1 += tf.reduce_sum([(self.reg_lambda * tf.nn.l2_loss(x)) for x in tf.trainable_variables()])
optimize_op1 = tf.train.AdagradOptimizer(self.lr).minimize(loss1)
with tf.variable_scope('relatedness'):
with tf.variable_scope('layer_1'):
inputs2 = tf.concat(2, [embeds, concat_outputs1, y_pos, y_chunk])
fw_lstm = MyLSTM(self.dim, state_is_tuple=True)
bw_lstm = MyLSTM(self.dim, state_is_tuple=True)
(outputs2, _) = tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_lstm, cell_bw=bw_lstm, inputs=inputs2, sequence_length=length(embeds), dtype=tf.float32)
concat_outputs2 = tf.concat(2, outputs2)
with tf.variable_scope('layer_2'):
inputs3 = tf.concat(2, [embeds, concat_outputs2, y_pos, y_chunk])
fw_lstm1 = MyLSTM(self.dim, state_is_tuple=True)
bw_lstm1 = MyLSTM(self.dim, state_is_tuple=True)
(outputs3, _) = tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_lstm1, cell_bw=bw_lstm1, inputs=inputs3, sequence_length=length(embeds), dtype=tf.float32)
concat_outputs3 = tf.concat(2, outputs3)
s = tf.reduce_max(concat_outputs3, reduction_indices=1)
with tf.variable_scope('layer_3'):
inputs4 = tf.concat(2, [embeds, concat_outputs3, y_pos, y_chunk])
fw_lstm2 = MyLSTM(self.dim, state_is_tuple=True)
bw_lstm2 = MyLSTM(self.dim, state_is_tuple=True)
(outputs4, _) = tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_lstm2, cell_bw=bw_lstm2, inputs=inputs4, sequence_length=length(embeds), dtype=tf.float32)
concat_outputs4 = tf.concat(2, outputs3)
s1 = tf.reduce_max(concat_outputs4, reduction_indices=1)
return (s, s1, optimize_op, optimize_op1, loss, loss1, y_pos, y_chunk)
with tf.variable_scope('sentence') as scope:
self.inp = tf.placeholder(shape=[None, self.max_length], dtype=tf.int32, name='input')
self.t_p = tf.placeholder(shape=[None, self.max_length], dtype=tf.int32, name='t_pos')
self.t_c = tf.placeholder(shape=[None, self.max_length], dtype=tf.int32, name='t_chunk')
(s11, s12, self.optimize_op, self.optimize_op1, self.loss, self.loss1, self.y_pos, self.y_chunk) = sentence_op(self.inp, self.t_p, self.t_c)
scope.reuse_variables()
self.inp1 = tf.placeholder(shape=[None, self.max_length], dtype=tf.int32, name='input1')
(s21, s22) = sentence_op(self.inp1, self.t_p, self.t_c)[:2]
d = tf.concat(1, [tf.abs(tf.sub(s11, s21)), tf.mul(s11, s21)])
d1 = tf.concat(1, [tf.sub(s12, s22), tf.mul(s12, s22)])
with tf.variable_scope('relation'):
self.y_rel = tf.squeeze(activate(d, [(self.dim * 4), 1], [1], activation=tf.nn.relu))
self.t_rel = tf.placeholder(shape=[None], dtype=tf.float32)
self.loss2 = rmse_loss(self.y_rel, self.t_rel)
self.loss2 += tf.reduce_sum([(self.reg_lambda * tf.nn.l2_loss(x)) for x in tf.trainable_variables()])
self.optimize_op2 = tf.train.AdagradOptimizer(self.lr).minimize(self.loss2)
with tf.variable_scope('entailment'):
self.t_ent = tf.placeholder(shape=[None], dtype=tf.int32)
t_ent_sparse = tf.one_hot(indices=self.t_ent, depth=3, axis=(- 1))
self.y_ent = activate(d1, [(self.dim * 4), 3], [3])
self.loss3 = (- tf.reduce_mean((t_ent_sparse * tf.log(self.y_ent))))
self.loss3 += tf.reduce_sum([(self.reg_lambda * tf.nn.l2_loss(x)) for x in tf.trainable_variables()])
self.optimize_op3 = tf.train.AdagradOptimizer(self.lr).minimize(self.loss3)
print('***Model built***')
def get_predictions(self, graph, task_desc):
resp = dict()
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
saver = tf.train.import_meta_graph('saves/model.ckpt.meta')
saver.restore(sess, tf.train.latest_checkpoint('./saves'))
if ('pos' in task_desc):
inp = task_desc['pos'].lower().split()
inputs = [([self.w2i[i] for i in inp] + ([(self.vec.shape[0] - 1)] * (self.max_length - len(inp))))]
preds = sess.run(self.y_pos, {self.inp: inputs})[0]
preds = np.argmax(preds, axis=(- 1))[:len(inp)]
preds = [self.i2p[i] for i in preds]
resp['pos'] = preds
if ('chunk' in task_desc):
inp = task_desc['chunk'].lower().split()
inputs = [([self.w2i[i] for i in inp] + ([(self.vec.shape[0] - 1)] * (self.max_length - len(inp))))]
preds = sess.run(self.y_chunk, {self.inp: inputs})[0]
preds = np.argmax(preds, axis=(- 1))[:len(inp)]
preds = [self.i2c[i] for i in preds]
resp['chunk'] = preds
if ('relatedness' in task_desc):
inp1 = task_desc['relatedness'][0].lower().split()
inputs1 = [([self.w2i[i] for i in inp1] + ([(self.vec.shape[0] - 1)] * (self.max_length - len(inp1))))]
inp2 = task_desc['relatedness'][1].lower().split()
inputs2 = [([self.w2i[i] for i in inp2] + ([(self.vec.shape[0] - 1)] * (self.max_length - len(inp2))))]
preds = sess.run(self.y_rel, {self.inp: inputs1, self.inp1: inputs2})
resp['relatedness'] = preds
if ('entailment' in task_desc):
inp1 = task_desc['entailment'][0].lower().split()
inputs1 = [([self.w2i[i] for i in inp1] + ([(self.vec.shape[0] - 1)] * (self.max_length - len(inp1))))]
inp2 = task_desc['entailment'][1].lower().split()
inputs2 = [([self.w2i[i] for i in inp2] + ([(self.vec.shape[0] - 1)] * (self.max_length - len(inp2))))]
preds = sess.run(self.y_ent, {self.inp: inputs1, self.inp1: inputs2})[0]
resp['entailment'] = self.i2e[np.argmax(preds)]
return resp
def train_model(self, graph, train_desc, resume=False):
saver = tf.train.Saver()
batch_size = train_desc['batch_size']
with tf.Session(graph=graph) as sess:
if resume:
saver = tf.train.import_meta_graph('saves/model.ckpt.meta')
saver.restore(sess, tf.train.latest_checkpoint('./saves'))
print('training resumed')
else:
sess.run(tf.global_variables_initializer())
if ('pos' in train_desc):
print('***Training POS layer***')
for i in range(train_desc['pos']):
(a, b, c) = get_batch_pos(self, batch_size)
(_, l) = sess.run([self.optimize_op, self.loss], {self.inp: a, self.t_p: b})
if ((i % 50) == 0):
print(l)
saver.save(sess, 'saves/model.ckpt')
if ('chunk' in train_desc):
print('***Training chunk layer***')
for i in range(train_desc['chunk']):
(a, b, c) = get_batch_pos(self, batch_size)
(_, l1) = sess.run([self.optimize_op1, self.loss1], {self.inp: a, self.t_p: b, self.t_c: c})
if ((i % 50) == 0):
print(l1)
saver.save(sess, 'saves/model.ckpt')
if ('relatedness' in train_desc):
print('***Training semantic relatedness***')
for i in range(train_desc['relatedness']):
(a, b, c, _) = get_batch_sent(self, batch_size)
(_, l2) = sess.run([self.optimize_op2, self.loss2], {self.inp: a, self.inp1: b, self.t_rel: c})
if ((i % 50) == 0):
print(l2)
saver.save(sess, 'saves/model.ckpt')
if ('entailment' in train_desc):
print('***Training semantic entailment***')
for i in range(train_desc['entailment']):
(a, b, _, c) = get_batch_sent(self, batch_size)
(_, l3) = sess.run([self.optimize_op3, self.loss3], {self.inp: a, self.inp1: b, self.t_ent: c})
if ((i % 50) == 0):
print(l3)
saver.save(sess, 'saves/model.ckpt')
|
class MyLSTM(tf.nn.rnn_cell.BasicLSTMCell):
def __call__(self, inputs, state, scope=None):
'LSTM as mentioned in paper.'
with vs.variable_scope((scope or 'basic_lstm_cell')):
if self._state_is_tuple:
(c, h) = state
else:
(c, h) = array_ops.split(value=state, num_or_size_splits=2, split_dim=1)
g = tf.concat(1, [inputs, h])
concat = linear([g], (4 * self._num_units), True, scope=scope)
(i, j, f, o) = array_ops.split(value=concat, num_split=4, split_dim=1)
new_c = ((c * sigmoid((f + self._forget_bias))) + (sigmoid(i) * self._activation(j)))
new_h = (self._activation(new_c) * sigmoid(o))
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat_v2([new_c, new_h], 1)
return (new_h, new_state)
|
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.lin0 = nn.Linear(3, nf[0])
self.resnet_block11 = HarmonicResNetBlock(nf[0], nf[0], max_order, n_rings, prev_order=0)
self.resnet_block12 = HarmonicResNetBlock(nf[0], nf[0], max_order, n_rings)
self.pool = ParallelTransportPool(1, scale1_transform)
self.resnet_block21 = HarmonicResNetBlock(nf[0], nf[1], max_order, n_rings)
self.resnet_block22 = HarmonicResNetBlock(nf[1], nf[1], max_order, n_rings)
self.resnet_block31 = HarmonicResNetBlock(nf[1], nf[1], max_order, n_rings)
self.resnet_block32 = HarmonicResNetBlock(nf[1], nf[1], max_order, n_rings)
self.unpool = ParallelTransportUnpool(1)
self.resnet_block41 = HarmonicResNetBlock((nf[1] + nf[0]), nf[0], max_order, n_rings)
self.resnet_block42 = HarmonicResNetBlock(nf[0], nf[0], max_order, n_rings, last_layer=True)
self.lin3 = nn.Linear(nf[0], 256)
self.lin4 = nn.Linear(256, num_nodes)
def forward(self, data):
x = data.pos
x = F.relu(self.lin0(x))
x = torch.stack((x, torch.zeros_like(x)), dim=(- 1)).unsqueeze(1)
data_scale0 = scale0_transform(data)
attributes = (data_scale0.edge_index, data_scale0.precomp, data_scale0.connection)
x = self.resnet_block11(x, *attributes)
x_prepool = self.resnet_block12(x, *attributes)
(x, data, data_pooled) = self.pool(x_prepool, data)
attributes_pooled = (data_pooled.edge_index, data_pooled.precomp, data_pooled.connection)
x = self.resnet_block21(x, *attributes_pooled)
x = self.resnet_block22(x, *attributes_pooled)
x = self.resnet_block31(x, *attributes_pooled)
x = self.resnet_block32(x, *attributes_pooled)
x = self.unpool(x, data)
x = torch.cat((x, x_prepool), dim=2)
x = self.resnet_block41(x, *attributes)
x = self.resnet_block42(x, *attributes)
x = magnitudes(x, keepdim=False)
x = x.sum(dim=1)
x = F.relu(self.lin3(x))
x = F.dropout(x, training=self.training)
x = self.lin4(x)
return F.log_softmax(x, dim=1)
|
def train(epoch):
model.train()
if (epoch == 60):
for param_group in optimizer.param_groups:
param_group['lr'] = 0.001
for data in progressbar.progressbar(train_loader):
data = data.to(device)
optimizer.zero_grad()
F.nll_loss(model(data), target).backward()
optimizer.step()
|
def test():
model.eval()
correct = 0
for (i, data) in progressbar.progressbar(enumerate(test_loader)):
pred = model(data.to(device)).max(1)[1]
correct += pred.eq(target).sum().item()
return (correct / (len(test_dataset) * num_nodes))
|
class FAUST(InMemoryDataset):
'The FAUST humans dataset from the `"FAUST: Dataset and Evaluation for\n 3D Mesh Registration"\n <http://files.is.tue.mpg.de/black/papers/FAUST2014.pdf>`_ paper,\n containing 100 watertight meshes representing 10 different poses for 10\n different subjects.\n\n .. note::\n\n Data objects hold mesh faces instead of edge indices.\n To convert the mesh to a graph, use the\n :obj:`torch_geometric.transforms.FaceToEdge` as :obj:`pre_transform`.\n To convert the mesh to a point cloud, use the\n :obj:`torch_geometric.transforms.SamplePoints` as :obj:`transform` to\n sample a fixed number of points on the mesh faces according to their\n face area.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n train (bool, optional): If :obj:`True`, loads the training dataset,\n otherwise the test dataset. (default: :obj:`True`)\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n '
url = 'http://faust.is.tue.mpg.de/'
def __init__(self, root, train=True, transform=None, pre_transform=None, pre_filter=None):
super(FAUST, self).__init__(root, transform, pre_transform, pre_filter)
path = (self.processed_paths[0] if train else self.processed_paths[1])
(self.data, self.slices) = torch.load(path)
@property
def raw_file_names(self):
return 'faust.zip'
@property
def processed_file_names(self):
return ['training.pt', 'test.pt']
def download(self):
raise RuntimeError('Dataset not found. Please download {} from {} and move it to {}'.format(self.raw_file_names, self.url, self.raw_dir))
def process(self):
extract_zip(self.raw_paths[0], self.raw_dir, log=False)
path = osp.join(self.raw_dir, 'meshes')
path = osp.join(path, 'tr_reg_{0:03d}.ply')
data_list = []
for i in progressbar.progressbar(range(100)):
data = read_ply(path.format(i))
if ((self.pre_filter is not None) and (not self.pre_filter(data))):
continue
if (self.pre_transform is not None):
data = self.pre_transform(data)
data_list.append(data)
torch.save(self.collate(data_list[:80]), self.processed_paths[0])
torch.save(self.collate(data_list[80:]), self.processed_paths[1])
shutil.rmtree(osp.join(self.raw_dir, 'meshes'))
shutil.rmtree(osp.join(self.raw_dir, 'segs'))
|
class FAUSTRemeshed(InMemoryDataset):
'The remeshed FAUST humans dataset from the paper `Multi-directional\n Geodesic Neural Networks via Equivariant Convolution`\n containing 100 watertight meshes representing 10 different poses for 10\n different subjects.\n\n .. note::\n\n Data objects hold mesh faces instead of edge indices.\n To convert the mesh to a graph, use the\n :obj:`torch_geometric.transforms.FaceToEdge` as :obj:`pre_transform`.\n To convert the mesh to a point cloud, use the\n :obj:`torch_geometric.transforms.SamplePoints` as :obj:`transform` to\n sample a fixed number of points on the mesh faces according to their\n face area.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n train (bool, optional): If :obj:`True`, loads the training dataset,\n otherwise the test dataset. (default: :obj:`True`)\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n '
url = 'https://surfdrive.surf.nl/files/index.php/s/KLSxAN0QEsfJuBV'
def __init__(self, root, train=True, transform=None, pre_transform=None, pre_filter=None):
super(FAUSTRemeshed, self).__init__(root, transform, pre_transform, pre_filter)
path = (self.processed_paths[0] if train else self.processed_paths[(- 1)])
(self.data, self.slices) = torch.load(path)
@property
def raw_file_names(self):
return 'FAUST_remeshed.zip'
@property
def processed_file_names(self):
return ['training.pt', 'test.pt']
def download(self):
raise RuntimeError('Dataset not found. Please download {} from {} and move it to {}'.format(self.raw_file_names, self.url, self.raw_dir))
def process(self):
extract_zip(self.raw_paths[0], self.raw_dir, log=False)
path = osp.join(self.raw_dir, 'shapes')
path = osp.join(path, 'tr_reg_{0:03d}.ply')
data_list = []
file_idx = 0
for i in progressbar.progressbar(range(100)):
data = read_ply(path.format(i))
if ((self.pre_filter is not None) and (not self.pre_filter(data))):
continue
if (self.pre_transform is not None):
data = self.pre_transform(data)
data_list.append(data)
if ((i == 79) or (i == 99)):
torch.save(self.collate(data_list), self.processed_paths[file_idx])
data_list = []
file_idx += 1
shutil.rmtree(osp.join(self.raw_dir, 'shapes'))
|
class ShapeSeg(InMemoryDataset):
'The Shape Segmentation dataset proposed by Maron et al. in\n "Convolutional neural networks on surfaces via seamless toric covers"\n <https://dl.acm.org/citation.cfm?id=3073616>`_ ,\n containing meshes from Adobe, SCAPE, FAUST, and MIT for training\n and SHREC shapes for testing.\n\n .. note::\n\n Data objects hold mesh faces instead of edge indices.\n To convert the mesh to a graph, use the\n :obj:`torch_geometric.transforms.FaceToEdge` as :obj:`pre_transform`.\n To convert the mesh to a point cloud, use the\n :obj:`torch_geometric.transforms.SamplePoints` as :obj:`transform` to\n sample a fixed number of points on the mesh faces according to their\n face area.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n train (bool, optional): If :obj:`True`, loads the training dataset,\n otherwise the test dataset. (default: :obj:`True`)\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n '
mit_folders = {'crane': 18, 'squat1': 25, 'jumping': 15, 'squat2': 25, 'bouncing': 18, 'march1': 25, 'handstand': 18, 'march2': 25}
url = 'https://surfdrive.surf.nl/files/index.php/s/L68uSYpHtfO6dLa'
def __init__(self, root, train=True, transform=None, pre_transform=None, pre_filter=None):
super(ShapeSeg, self).__init__(root, transform, pre_transform, pre_filter)
path = (self.processed_paths[0] if train else self.processed_paths[1])
(self.data, self.slices) = torch.load(path)
@property
def raw_file_names(self):
return ['shapeseg.zip']
@property
def processed_file_names(self):
return ['training.pt', 'test.pt']
def download(self):
raise RuntimeError('Dataset not found. Please download {} from {} and move it to {}'.format(self.raw_file_names, self.url, self.raw_dir))
def process(self):
print('Extracting zip...')
extract_zip(self.raw_paths[0], self.raw_dir, log=False)
shapeseg_path = osp.join(self.raw_dir, 'ShapeSeg')
data_list = []
print('Processing Adobe')
adobe_path = osp.join(shapeseg_path, 'Adobe', 'raw')
extract_zip(osp.join(adobe_path, 'adobe.zip'), adobe_path)
adobe_meshes = osp.join(adobe_path, 'meshes')
adobe_meshes = osp.join(adobe_meshes, '{}.ply')
adobe_segs = osp.join(adobe_path, 'segs', '{}.pt')
for i in progressbar.progressbar(range(41)):
data = read_ply(adobe_meshes.format(i))
if ((self.pre_filter is not None) and (not self.pre_filter(data))):
continue
if (self.pre_transform is not None):
data = self.pre_transform(data)
data.y = torch.load(adobe_segs.format(i))
if hasattr(data, 'sample_idx'):
data.y = data.y[data.sample_idx]
data_list.append(data)
torch.save(self.collate(data_list), osp.join(shapeseg_path, 'adobe.pt'))
print('Processing FAUST')
faust_path = osp.join(shapeseg_path, 'FAUST', 'raw')
extract_zip(osp.join(faust_path, 'faust.zip'), faust_path)
faust_meshes = osp.join(faust_path, 'meshes')
faust_meshes = osp.join(faust_meshes, 'tr_reg_{0:03d}.ply')
faust_segs = torch.load(osp.join(faust_path, 'segs', 'faust_seg.pt'))
for i in progressbar.progressbar(range(100)):
data = read_ply(faust_meshes.format(i))
if ((self.pre_filter is not None) and (not self.pre_filter(data))):
continue
if (self.pre_transform is not None):
data = self.pre_transform(data)
data.y = faust_segs
if hasattr(data, 'sample_idx'):
data.y = data.y[data.sample_idx]
data_list.append(data)
torch.save(self.collate(data_list), osp.join(shapeseg_path, 'faust.pt'))
print('Processing MIT')
mit_path = osp.join(shapeseg_path, 'MIT', 'raw')
extract_zip(osp.join(mit_path, 'mit.zip'), mit_path)
mit_meshes = osp.join(mit_path, 'meshes')
mit_seg = osp.join(mit_path, 'segs')
for filename in progressbar.progressbar(osls(mit_meshes)):
data = read_obj(osp.join(mit_meshes, filename))
seg_path = osp.join(mit_seg, filename.replace('.obj', '.eseg'))
segs = torch.from_numpy(np.loadtxt(seg_path)).long()
data.y = edge_to_vertex_labels(data.face, segs, data.num_nodes)
if ((self.pre_filter is not None) and (not self.pre_filter(data))):
continue
if (self.pre_transform is not None):
data = self.pre_transform(data)
data_list.append(data)
torch.save(self.collate(data_list), osp.join(shapeseg_path, 'mit.pt'))
print('Processing SCAPE')
scape_path = osp.join(shapeseg_path, 'SCAPE', 'raw')
extract_zip(osp.join(scape_path, 'scape.zip'), scape_path)
scape_meshes = osp.join(scape_path, 'meshes')
scape_meshes = osp.join(scape_meshes, '{}.ply')
scape_segs = torch.load(osp.join(scape_path, 'segs', 'scape_seg.pt'))
for i in progressbar.progressbar(range(71)):
data = read_ply(scape_meshes.format(i))
if ((self.pre_filter is not None) and (not self.pre_filter(data))):
continue
if (self.pre_transform is not None):
data = self.pre_transform(data)
data.y = scape_segs
if hasattr(data, 'sample_idx'):
data.y = data.y[data.sample_idx]
data_list.append(data)
torch.save(self.collate(data_list), osp.join(shapeseg_path, 'scape.pt'))
torch.save(self.collate(data_list), self.processed_paths[0])
data_list = []
print('Processing SHREC')
shrec_path = osp.join(shapeseg_path, 'SHREC', 'raw')
extract_zip(osp.join(shrec_path, 'shrec.zip'), shrec_path)
shrec_meshes = osp.join(shrec_path, 'meshes')
shrec_meshes = osp.join(shrec_meshes, '{}.ply')
shrec_segs = osp.join(shrec_path, 'segs', '{}.pt')
for i in progressbar.progressbar(range(18)):
data = read_ply(shrec_meshes.format(i))
if ((self.pre_filter is not None) and (not self.pre_filter(data))):
continue
if (self.pre_transform is not None):
data = self.pre_transform(data)
data.y = torch.load(shrec_segs.format(i))
if hasattr(data, 'sample_idx'):
data.y = data.y[data.sample_idx]
data_list.append(data)
torch.save(self.collate(data_list), self.processed_paths[1])
shutil.rmtree(osp.join(self.raw_dir, 'ShapeSeg'))
|
class Shrec16(InMemoryDataset):
'The shrec classification dataset.\n\n This is the remeshed version from MeshCNN.\n\n .. note::\n\n Data objects hold mesh faces instead of edge indices.\n To convert the mesh to a graph, use the\n :obj:`torch_geometric.transforms.FaceToEdge` as :obj:`pre_transform`.\n To convert the mesh to a point cloud, use the\n :obj:`torch_geometric.transforms.SamplePoints` as :obj:`transform` to\n sample a fixed number of points on the mesh faces according to their\n face area.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n train (bool, optional): If :obj:`True`, loads the training dataset,\n otherwise the test dataset. (default: :obj:`True`)\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n '
url = 'https://surfdrive.surf.nl/files/index.php/s/ifhelkX4cd7ky8W'
class_names = ['alien', 'ants', 'armadillo', 'bird1', 'bird2', 'camel', 'cat', 'centaur', 'dinosaur', 'dino_ske', 'dog1', 'dog2', 'flamingo', 'glasses', 'gorilla', 'hand', 'horse', 'lamp', 'laptop', 'man', 'myScissor', 'octopus', 'pliers', 'rabbit', 'santa', 'shark', 'snake', 'spiders', 'two_balls', 'woman']
def __init__(self, root, train=True, transform=None, pre_transform=None, pre_filter=None, split10=True):
self.split10 = split10
super(Shrec16, self).__init__(root, transform, pre_transform, pre_filter)
path = (self.processed_paths[0] if train else self.processed_paths[1])
(self.data, self.slices) = torch.load(path)
@property
def raw_file_names(self):
return ['shrec_16.zip']
@property
def processed_file_names(self):
return ['training.pt', 'test.pt']
@property
def num_classes(self):
return len(self.class_names)
def download(self):
raise RuntimeError('Dataset not found. Please download {} from {} and move it to {}'.format(self.raw_file_names, self.url, self.raw_dir))
def process(self):
print('Extracting zip...')
extract_zip(self.raw_paths[0], self.raw_dir, log=False)
training_list = []
test_list = []
print('Processing Shrec...')
raw_path = osp.join(self.raw_dir, 'shrec_16')
for (class_idx, class_name) in enumerate(self.class_names):
train_meshes = osp.join(raw_path, class_name, 'train')
mesh_files = osls(train_meshes)
idx = (np.random.permutation(len(mesh_files))[:10] if self.split10 else np.arange(len(mesh_files)))
for (file_i, filename) in progressbar.progressbar(enumerate(mesh_files)):
if (file_i not in idx):
continue
data = read_obj(osp.join(train_meshes, filename))
data.y = class_idx
if ((self.pre_filter is not None) and (not self.pre_filter(data))):
continue
if (self.pre_transform is not None):
data = self.pre_transform(data)
training_list.append(data)
test_meshes = osp.join(raw_path, class_name, 'test')
for filename in progressbar.progressbar(osls(test_meshes)):
data = read_obj(osp.join(test_meshes, filename))
data.y = class_idx
if ((self.pre_filter is not None) and (not self.pre_filter(data))):
continue
if (self.pre_transform is not None):
data = self.pre_transform(data)
test_list.append(data)
torch.save(self.collate(training_list), self.processed_paths[0])
torch.save(self.collate(test_list), self.processed_paths[1])
shutil.rmtree(osp.join(self.raw_dir, 'shrec_16'))
|
class ComplexLin(nn.Module):
'A linear layer applied to complex feature vectors\n The result is a linear combination of the complex input features\n\n Args:\n in_channels (int): number of input features\n out_channels (int): number of output features\n '
def __init__(self, in_channels, out_channels):
super(ComplexLin, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.lin = nn.Linear(in_channels, out_channels)
def forward(self, x):
x = x.permute(0, 1, 3, 2)
sh = x.size()
x = self.lin(x.reshape((- 1), self.in_channels))
return x.reshape(sh[0], sh[1], sh[2], self.out_channels).permute(0, 1, 3, 2)
|
class ComplexNonLin(nn.Module):
'Adds a learned bias and applies the nonlinearity\n given by fnc to the radial component of complex features\n\n Args:\n num_features (int): number of input features\n fnc (torch.nn.Module): non-linearity function\n '
def __init__(self, in_channels, fnc=F.relu):
super(ComplexNonLin, self).__init__()
self.fnc = fnc
self.bias = nn.Parameter(torch.Tensor(in_channels))
zeros(self.bias)
def forward(self, x):
magnitude = magnitudes(x)
rb = (magnitude + self.bias.unsqueeze((- 1)))
c = torch.div(self.fnc(rb), magnitude)
return (c * x)
|
class HarmonicResNetBlock(torch.nn.Module):
'\n ResNet block with convolutions, linearities, and non-linearities\n as described in Harmonic Surface Networks\n\n Args:\n in_channels (int): number of input features\n out_channels (int): number of output features\n prev_order (int, optional): the maximum rotation order of the previous layer,\n should be set to 0 if this is the first layer (default: :obj:`1`)\n max_order (int, optionatl): the maximum rotation order of this convolution\n will convolve with every rotation order up to and including `max_order`,\n (default: :obj:`1`)\n n_rings (int, optional): the number of rings in the radial profile (default: :obj:`2`)\n offset (bool, optional): if set to :obj:`False`, does not learn an offset parameter,\n this is practical in the last layer of a network (default: :obj:`True`)\n separate_streams (bool, optional): if set to :obj:`True`, learns a radial profile\n for each convolution connecting streams, instead of only an m=0 and m=1 convolution\n (default: :obj:`True`)\n last_layer (bool, optional): if set to :obj:`True`, does not learn a phase offset\n for the last harmonic conv. (default :obj:`False`)\n '
def __init__(self, in_channels, out_channels, max_order=1, n_rings=2, prev_order=1, offset=True, separate_streams=True, last_layer=False):
super(HarmonicResNetBlock, self).__init__()
self.prev_order = prev_order
self.conv1 = HarmonicConv(in_channels, out_channels, max_order, n_rings, prev_order, offset, separate_streams)
self.nonlin1 = ComplexNonLin(out_channels, F.relu)
self.conv2 = HarmonicConv(out_channels, out_channels, max_order, n_rings, max_order, (offset and (not last_layer)), separate_streams)
self.project_residuals = (in_channels != out_channels)
if self.project_residuals:
self.lin = ComplexLin(in_channels, out_channels)
self.nonlin2 = ComplexNonLin(out_channels, F.relu)
def forward(self, x, edge_index, precomp, connection=None):
if (self.prev_order == 0):
x_conv = self.nonlin1(self.conv1(x, edge_index, precomp))
else:
x_conv = self.nonlin1(self.conv1(x, edge_index, precomp, connection))
x_conv = self.conv2(x_conv, edge_index, precomp, connection)
x = (self.lin(x) if self.project_residuals else x)
x_conv = (x_conv + x)
return self.nonlin2(x_conv)
|
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = HarmonicConv(1, nf[0], max_order, n_rings, prev_order=0)
self.nonlin1 = ComplexNonLin(nf[0])
self.conv2 = HarmonicConv(nf[0], nf[0], max_order, n_rings)
self.bn1 = nn.BatchNorm1d(((max_order + 1) * nf[0]), eps=0.001, momentum=0.01)
self.pool1 = ParallelTransportPool(1, scale1_transform)
self.conv3 = HarmonicConv(nf[0], nf[1], max_order, n_rings)
self.nonlin3 = ComplexNonLin(nf[1])
self.conv4 = HarmonicConv(nf[1], nf[1], max_order, n_rings)
self.bn2 = nn.BatchNorm1d(((max_order + 1) * nf[1]), eps=0.001, momentum=0.01)
self.pool2 = ParallelTransportPool(2, scale2_transform)
self.conv5 = HarmonicConv(nf[1], nf[2], max_order, n_rings)
self.nonlin5 = ComplexNonLin(nf[2])
self.conv6 = HarmonicConv(nf[2], nf[2], max_order, n_rings)
self.bn3 = nn.BatchNorm1d(((max_order + 1) * nf[2]), eps=0.001, momentum=0.01)
self.conv7 = HarmonicConv(nf[2], 10, max_order, n_rings, offset=False)
self.bias = nn.Parameter(torch.Tensor(10))
zeros(self.bias)
def forward(self, data):
x = torch.stack((data.x, torch.zeros_like(data.x)), dim=(- 1)).unsqueeze(1)
batch_size = data.num_graphs
n_nodes = x.size(0)
data_scale0 = scale0_transform(data)
attributes = (data_scale0.edge_index, data_scale0.precomp, data_scale0.connection)
x = self.conv1(x, attributes[0], attributes[1])
x = self.nonlin1(x)
x = self.conv2(x, *attributes)
x = c_batch_norm(x, batch_size, self.bn1, F.relu)
(x, data, data_pooled) = self.pool1(x, data)
attributes_pooled = (data_pooled.edge_index, data_pooled.precomp, data_pooled.connection)
x = self.conv3(x, *attributes_pooled)
x = self.nonlin3(x)
x = self.conv4(x, *attributes_pooled)
x = c_batch_norm(x, batch_size, self.bn2, F.relu)
(x, data, data_pooled) = self.pool2(x, data)
attributes_pooled = (data_pooled.edge_index, data_pooled.precomp, data_pooled.connection)
x = self.conv5(x, *attributes_pooled)
x = self.nonlin5(x)
x = self.conv6(x, *attributes_pooled)
x = c_batch_norm(x, batch_size, self.bn3, F.relu)
x = self.conv7(x, *attributes_pooled)
x = magnitudes(x, keepdim=False)
x = x.sum(dim=1)
x = global_mean_pool(x, data.batch)
x = (x + self.bias)
return F.log_softmax(x, dim=1)
|
def train(epoch):
model.train()
for param_group in optimizer.param_groups:
param_group['lr'] = (param_group['lr'] * np.power(0.1, (epoch / 50)))
for data in train_loader:
optimizer.zero_grad()
F.nll_loss(model(data.to(device)), data.y).backward()
optimizer.step()
|
def test():
model.eval()
correct = 0
for data in progressbar.progressbar(test_loader):
data = data.to(device)
pred = model(data).max(1)[1]
correct += pred.eq(data.y).sum().item()
return (correct / len(test_dataset))
|
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.lin0 = nn.Linear(3, nf[0])
self.resnet_block11 = HarmonicResNetBlock(nf[0], nf[0], max_order, n_rings, prev_order=0)
self.pool = ParallelTransportPool(1, scale1_transform)
self.resnet_block21 = HarmonicResNetBlock(nf[0], nf[1], max_order, n_rings)
self.conv_final = HarmonicConv(nf[1], n_classes, max_order, n_rings, offset=False)
self.bias = nn.Parameter(torch.Tensor(n_classes))
zeros(self.bias)
def forward(self, data):
x = data.pos
x = F.relu(self.lin0(x))
x = torch.stack((x, torch.zeros_like(x)), dim=(- 1)).unsqueeze(1)
data_scale0 = scale0_transform(data)
attributes = (data_scale0.edge_index, data_scale0.precomp, data_scale0.connection)
x = self.resnet_block11(x, *attributes)
(x, data, data_pooled) = self.pool(x, data)
attributes_pooled = (data_pooled.edge_index, data_pooled.precomp, data_pooled.connection)
x = self.resnet_block21(x, *attributes_pooled)
x = self.conv_final(x, *attributes_pooled)
x = magnitudes(x, keepdim=False)
x = x.sum(dim=1)
x = torch.mean(x, dim=0, keepdim=True)
x = (x + self.bias)
return F.log_softmax(x, dim=1)
|
def train(epoch):
model.train()
if (epoch > 30):
for param_group in optimizer.param_groups:
param_group['lr'] = 0.001
for data in train_loader:
optimizer.zero_grad()
F.nll_loss(model(data.to(device)), data.y).backward()
optimizer.step()
|
def test():
model.eval()
correct = 0
total_num = 0
for (i, data) in enumerate(test_loader):
pred = model(data.to(device)).max(1)[1]
correct += pred.eq(data.y).sum().item()
total_num += 1
return (correct / total_num)
|
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.lin0 = nn.Linear(3, nf[0])
self.resnet_block11 = HarmonicResNetBlock(nf[0], nf[0], max_order, n_rings, prev_order=0)
self.resnet_block12 = HarmonicResNetBlock(nf[0], nf[0], max_order, n_rings)
self.pool = ParallelTransportPool(1, scale1_transform)
self.resnet_block21 = HarmonicResNetBlock(nf[0], nf[1], max_order, n_rings)
self.resnet_block22 = HarmonicResNetBlock(nf[1], nf[1], max_order, n_rings)
self.resnet_block31 = HarmonicResNetBlock(nf[1], nf[1], max_order, n_rings)
self.resnet_block32 = HarmonicResNetBlock(nf[1], nf[1], max_order, n_rings)
self.unpool = ParallelTransportUnpool(from_lvl=1)
self.resnet_block41 = HarmonicResNetBlock((nf[1] + nf[0]), nf[0], max_order, n_rings)
self.resnet_block42 = HarmonicResNetBlock(nf[0], nf[0], max_order, n_rings)
self.conv_final = HarmonicConv(nf[0], n_classes, max_order, n_rings, offset=False)
self.bias = nn.Parameter(torch.Tensor(n_classes))
zeros(self.bias)
def forward(self, data):
x = data.pos
x = F.relu(self.lin0(x))
x = torch.stack((x, torch.zeros_like(x)), dim=(- 1)).unsqueeze(1)
data_scale0 = scale0_transform(data)
attributes = (data_scale0.edge_index, data_scale0.precomp, data_scale0.connection)
x = self.resnet_block11(x, *attributes)
x_prepool = self.resnet_block12(x, *attributes)
(x, data, data_pooled) = self.pool(x_prepool, data)
attributes_pooled = (data_pooled.edge_index, data_pooled.precomp, data_pooled.connection)
x = self.resnet_block21(x, *attributes_pooled)
x = self.resnet_block22(x, *attributes_pooled)
x = self.resnet_block31(x, *attributes_pooled)
x = self.resnet_block32(x, *attributes_pooled)
x = self.unpool(x, data)
x = torch.cat((x, x_prepool), dim=2)
x = self.resnet_block41(x, *attributes)
x = self.resnet_block42(x, *attributes)
x = self.conv_final(x, *attributes)
x = magnitudes(x, keepdim=False)
x = x.sum(dim=1)
x = (x + self.bias)
return F.log_softmax(x, dim=1)
|
def train(epoch):
model.train()
if (epoch > 20):
for param_group in optimizer.param_groups:
param_group['lr'] = 0.001
for data in progressbar.progressbar(train_loader):
optimizer.zero_grad()
F.nll_loss(model(data.to(device)), data.y).backward()
optimizer.step()
|
def test():
model.eval()
correct = 0
total_num = 0
for (i, data) in enumerate(test_loader):
pred = model(data.to(device)).max(1)[1]
correct += pred.eq(data.y).sum().item()
total_num += data.y.size(0)
return (correct / total_num)
|
class MultiscaleRadiusGraph(object):
'Creates a radius graph for multiple pooling levels.\n The nodes and adjacency matrix for each pooling level can be accessed by masking\n tensors with values for nodes and edges with data.node_mask and data.edge_mask, respectively.\n\n Edges can belong to multiple levels,\n therefore we store the membership of an edge for a certain level with a bitmask:\n - The bit at position 2 * n corresponds to the edges used for pooling to level n\n - The bit at position 2 * n + 1 corresponds to the edges used for convolution in level n\n\n To find out if an edge belongs to a level, use a bitwise AND:\n `edge_mask & (0b1 << lvl) > 0`\n\n Args:\n ratios (list): the ratios for downsampling at each pooling layer.\n radii (list): the radius of the kernel support for each scale.\n max_neighbours (int, optional): the maximum number of neighbors per vertex,\n important to set higher than the expected number of neighbors.\n sample_n (int, optional): if provided, constructs a graph for only sample_n vertices.\n loop (bool, optional): If :obj:`True`, the graph will contain\n self-loops. (default: :obj:`False`)\n flow (string, optional): The flow direction when using in combination\n with message passing (:obj:`"source_to_target"` or\n :obj:`"target_to_source"`). (default: :obj:`"source_to_target"`)\n cache_file (string, optional): if set, cache the precomputation\n in the given file and reuse for every following shape.\n '
def __init__(self, ratios, radii, max_neighbours=512, sample_n=None, loop=False, flow='source_to_target', cache_file=None):
assert (len(ratios) == len(radii))
self.ratios = ratios
self.radii = radii
self.max_neighbours = max_neighbours
self.sample_n = sample_n
self.loop = loop
self.flow = flow
self.get_cache = ((cache_file is not None) and osp.exists(cache_file))
self.save_cache = ((cache_file is not None) and (not self.get_cache))
self.cache_file = cache_file
if self.get_cache:
(self.edge_index, self.node_mask, self.edge_mask) = torch.load(cache_file)
def __call__(self, data):
if self.get_cache:
(data.edge_index, data.node_mask, data.edge_mask) = (self.edge_index, self.node_mask, self.edge_mask)
return data
data.edge_attr = None
batch = (data.batch if ('batch' in data) else None)
pos = data.pos
edge_index = torch.LongTensor()
edge_mask = torch.LongTensor()
node_mask = torch.zeros(data.num_nodes)
if ((self.sample_n is not None) and (not (self.sample_n > data.pos.size(0)))):
sample_idx = fps(pos, batch, ratio=(self.sample_n / data.pos.size(0))).sort()[0]
else:
sample_idx = torch.arange(data.num_nodes)
data.sample_idx = sample_idx
original_idx = torch.arange(sample_idx.size(0))
pos = pos[sample_idx]
batch = (batch[sample_idx] if (batch is not None) else None)
for (i, r) in enumerate(self.ratios):
if (r == 1):
pool_idx = original_idx
else:
pool_idx = fps(pos, batch, r).sort()[0]
(pos_vh, face_vh) = (data.pos.cpu().numpy(), data.face.cpu().numpy().T)
(idx_vh, labels_vh) = (sample_idx[pool_idx].cpu().numpy(), np.arange(pool_idx.size(0)))
pool_neigh = torch.from_numpy(vh.nearest(pos_vh, face_vh, idx_vh, labels_vh)).round().long().clamp(0, (pool_idx.size(0) - 1)).view((- 1))
edge_index = torch.cat((edge_index, torch.stack((original_idx[pool_idx][pool_neigh][sample_idx], original_idx), dim=0)), dim=1)
edge_mask = torch.cat((edge_mask, (torch.ones(sample_idx.size(0)) * (1 << (i * 2))).long()))
(sample_idx, original_idx, pos, batch) = (sample_idx[pool_idx], original_idx[pool_idx], pos[pool_idx], (batch[pool_idx] if (batch is not None) else None))
node_mask[sample_idx] = i
radius_edges = radius(pos, pos, self.radii[i], batch, batch, self.max_neighbours)
radius_edges = original_idx[radius_edges]
edge_index = torch.cat((edge_index, radius_edges), dim=1)
edge_mask = torch.cat((edge_mask, (torch.ones(radius_edges.size(1)) * (1 << ((i * 2) + 1))).long()))
(edge_index, edge_mask) = coalesce(edge_index, edge_mask, data.num_nodes, data.num_nodes, 'add')
data.edge_index = edge_index
data.node_mask = node_mask
data.edge_mask = edge_mask
if self.save_cache:
with open(self.cache_file, 'wb') as f:
torch.save((edge_index, node_mask, edge_mask), f)
self.save_cache = False
self.get_cache = True
(self.edge_index, self.node_mask, self.edge_mask) = (edge_index, node_mask, edge_mask)
return data
def __repr__(self):
return '{}(radii={}, ratios={}, sample_n={})'.format(self.__class__.__name__, self.radii, self.ratios, self.sample_n)
|
class NormalizeArea(object):
'Centers shapes and normalizes their surface area.\n '
def __init__(self):
return
def __call__(self, data):
data.pos = (data.pos - ((torch.max(data.pos, dim=0)[0] + torch.min(data.pos, dim=0)[0]) / 2))
(pos_vh, face_vh) = (data.pos.cpu().numpy(), data.face.cpu().numpy().T)
area = (1 / np.sqrt(vh.surface_area(pos_vh, face_vh)))
data.pos = (data.pos * area)
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
|
class Subsample(object):
'Samples only the positions and descriptors that are set in data.sample_idx.\n '
def __init__(self):
return
def __call__(self, data):
assert hasattr(data, 'sample_idx')
sample_idx = data.sample_idx
data.pos = data.pos[sample_idx]
if hasattr(data, 'node_mask'):
data.node_mask = data.node_mask[sample_idx]
if hasattr(data, 'desc'):
data.desc = data.desc[sample_idx]
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
|
def write_ply(file, data, pred, features):
' \n Creates a ply file with the mesh,\n given predictions on each vertex and features from inside the network\n Can be used to visualize features in other software\n\n :param file: file name to write to\n :param data: mesh object with positions and faces in `pos` and `face`\n :param pred: predictions for each vertex, size [n_vert]\n :param features: complex features for each vertex, size [n_vert, 2]\n '
with open(file, 'w') as f:
f.write(((((((((((('ply\n' + 'format ascii 1.0\n') + 'element vertex {}\n'.format(data.pos.size(0))) + 'property float x\n') + 'property float y\n') + 'property float z\n') + 'property float fx\n') + 'property float fy\n') + 'property float seg\n') + 'element face {}\n'.format(data.face.size(1))) + 'property list uchar uint vertex_indices\n') + 'end_header\n'))
for i in range(data.pos.size(0)):
f.write('{:1.6f} {:1.6f} {:1.6f} {:1.6f} {:1.6f} {}\n'.format(data.pos[(i, 0)], data.pos[(i, 1)], data.pos[(i, 2)], features[(i, 0)], features[(i, 1)], pred[i]))
for face in data.face.transpose(0, 1):
f.write('3 {} {} {}\n'.format(face[0], face[1], face[2]))
return
|
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
|
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(('CMake must be installed to build the following extensions: ' + ', '.join((e.name for e in self.extensions))))
if (platform.system() == 'Windows'):
cmake_version = LooseVersion(re.search('version\\s*([\\d.]+)', out.decode()).group(1))
if (cmake_version < '3.1.0'):
raise RuntimeError('CMake >= 3.1.0 is required on Windows')
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = [('-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir), ('-DPYTHON_EXECUTABLE=' + sys.executable)]
cfg = ('Debug' if self.debug else 'Release')
build_args = ['--config', cfg]
if (platform.system() == 'Windows'):
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if (sys.maxsize > (2 ** 32)):
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += [('-DCMAKE_BUILD_TYPE=' + cfg)]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''), self.distribution.get_version())
if (not os.path.exists(self.build_temp)):
os.makedirs(self.build_temp)
subprocess.check_call((['cmake', ext.sourcedir] + cmake_args), cwd=self.build_temp, env=env)
subprocess.check_call((['cmake', '--build', '.'] + build_args), cwd=self.build_temp)
|
def evaluate(j, e, solver, scores1, scores2, data_loader, logdir, reference_point, split, result_dict):
'\n Do one forward pass through the dataloader and log the scores.\n '
assert (split in ['train', 'val', 'test'])
mode = 'pf'
if (mode == 'pf'):
assert (len(scores1) == len(scores2) <= 3), 'Cannot generate cirlce points for more than 3 dimensions.'
n_test_rays = 25
test_rays = utils.circle_points(n_test_rays, dim=len(scores1))
elif (mode == 'mcr'):
test_rays = np.ones((1, len(scores1)))
test_rays /= test_rays.sum(axis=1).reshape(1, 1)
else:
raise ValueError()
print(test_rays[0])
score_values1 = np.array([])
score_values2 = np.array([])
for (k, batch) in enumerate(data_loader):
print(f'eval batch {(k + 1)} of {len(data_loader)}')
batch = utils.dict_to_cuda(batch)
s1 = []
s2 = []
for l in solver.eval_step(batch, test_rays):
batch.update(l)
s1.append([s(**batch) for s in scores1])
s2.append([s(**batch) for s in scores2])
if (score_values1.size == 0):
score_values1 = np.array(s1)
score_values2 = np.array(s2)
else:
score_values1 += np.array(s1)
score_values2 += np.array(s2)
score_values1 /= len(data_loader)
score_values2 /= len(data_loader)
hv = HyperVolume(reference_point)
if (mode == 'pf'):
pareto_front = utils.ParetoFront([s.__class__.__name__ for s in scores1], logdir, '{}_{:03d}'.format(split, e))
pareto_front.append(score_values1)
pareto_front.plot()
volume = hv.compute(score_values1)
else:
volume = (- 1)
result = {'scores_loss': score_values1.tolist(), 'scores_mcr': score_values2.tolist(), 'hv': volume, 'task': j, 'max_epoch_so_far': (- 1), 'max_volume_so_far': (- 1), 'training_time_so_far': (- 1)}
result.update(solver.log())
result_dict[f'start_{j}'][f'epoch_{e}'] = result
with open((pathlib.Path(logdir) / f'{split}_results.json'), 'w') as file:
json.dump(result_dict, file)
return result_dict
|
def eval(settings):
'\n The full evaluation loop. Generate scores for all checkpoints found in the directory specified above.\n\n Uses the same ArgumentParser as main.py to determine the method and dataset.\n '
settings['batch_size'] = 2048
print('start evaluation with settings', settings)
logdir = os.path.join(settings['logdir'], settings['method'], settings['dataset'], utils.get_runname(settings))
pathlib.Path(logdir).mkdir(parents=True, exist_ok=True)
train_set = utils.dataset_from_name(split='train', **settings)
val_set = utils.dataset_from_name(split='val', **settings)
test_set = utils.dataset_from_name(split='test', **settings)
train_loader = data.DataLoader(train_set, settings['batch_size'], shuffle=True, num_workers=settings['num_workers'])
val_loader = data.DataLoader(val_set, settings['batch_size'], shuffle=True, num_workers=settings['num_workers'])
test_loader = data.DataLoader(test_set, settings['batch_size'], settings['num_workers'])
objectives = from_name(settings.pop('objectives'), val_set.task_names())
scores1 = from_objectives(objectives)
scores2 = [mcr(o.label_name, o.logits_name) for o in objectives]
solver = solver_from_name(objectives=objectives, **settings)
train_results = dict(settings=settings, num_parameters=utils.num_parameters(solver.model_params()))
val_results = dict(settings=settings, num_parameters=utils.num_parameters(solver.model_params()))
test_results = dict(settings=settings, num_parameters=utils.num_parameters(solver.model_params()))
task_ids = (settings['task_ids'] if (settings['method'] == 'SingleTask') else [0])
for j in task_ids:
if (settings['method'] == 'SingleTask'):
checkpoints = pathlib.Path(CHECKPOINT_DIR).glob(f'**/*_{j:03d}/*/c_*.pth')
else:
checkpoints = pathlib.Path(CHECKPOINT_DIR).glob('**/c_*.pth')
train_results[f'start_{j}'] = {}
val_results[f'start_{j}'] = {}
test_results[f'start_{j}'] = {}
for c in sorted(checkpoints):
print('checkpoint', c)
(_, e) = c.stem.replace('c_', '').split('-')
j = int(j)
e = int(e)
solver.model.load_state_dict(torch.load(c))
val_results = evaluate(j, e, solver, scores1, scores2, val_loader, logdir, reference_point=settings['reference_point'], split='val', result_dict=val_results)
test_results = evaluate(j, e, solver, scores1, scores2, test_loader, logdir, reference_point=settings['reference_point'], split='test', result_dict=test_results)
|
class HyperVolume():
'\n Hypervolume computation based on variant 3 of the algorithm in the paper:\n C. M. Fonseca, L. Paquete, and M. Lopez-Ibanez. An improved dimension-sweep\n algorithm for the hypervolume indicator. In IEEE Congress on Evolutionary\n Computation, pages 1157-1163, Vancouver, Canada, July 2006.\n\n Minimization is implicitly assumed here!\n\n '
def __init__(self, referencePoint):
'Constructor.'
self.referencePoint = referencePoint
self.list = []
def compute(self, front):
'Returns the hypervolume that is dominated by a non-dominated front.\n\n Before the HV computation, front and reference point are translated, so\n that the reference point is [0, ..., 0].\n\n '
def weaklyDominates(point, other):
for i in range(len(point)):
if (point[i] > other[i]):
return False
return True
relevantPoints = []
referencePoint = self.referencePoint
dimensions = len(referencePoint)
for point in front:
if weaklyDominates(point, referencePoint):
relevantPoints.append(point)
if any(referencePoint):
for j in range(len(relevantPoints)):
relevantPoints[j] = [(relevantPoints[j][i] - referencePoint[i]) for i in range(dimensions)]
self.preProcess(relevantPoints)
bounds = ([(- 1e+308)] * dimensions)
hyperVolume = self.hvRecursive((dimensions - 1), len(relevantPoints), bounds)
return hyperVolume
def hvRecursive(self, dimIndex, length, bounds):
'Recursive call to hypervolume calculation.\n\n In contrast to the paper, the code assumes that the reference point\n is [0, ..., 0]. This allows the avoidance of a few operations.\n\n '
hvol = 0.0
sentinel = self.list.sentinel
if (length == 0):
return hvol
elif (dimIndex == 0):
return (- sentinel.next[0].cargo[0])
elif (dimIndex == 1):
q = sentinel.next[1]
h = q.cargo[0]
p = q.next[1]
while (p is not sentinel):
pCargo = p.cargo
hvol += (h * (q.cargo[1] - pCargo[1]))
if (pCargo[0] < h):
h = pCargo[0]
q = p
p = q.next[1]
hvol += (h * q.cargo[1])
return hvol
else:
remove = self.list.remove
reinsert = self.list.reinsert
hvRecursive = self.hvRecursive
p = sentinel
q = p.prev[dimIndex]
while (q.cargo != None):
if (q.ignore < dimIndex):
q.ignore = 0
q = q.prev[dimIndex]
q = p.prev[dimIndex]
while ((length > 1) and ((q.cargo[dimIndex] > bounds[dimIndex]) or (q.prev[dimIndex].cargo[dimIndex] >= bounds[dimIndex]))):
p = q
remove(p, dimIndex, bounds)
q = p.prev[dimIndex]
length -= 1
qArea = q.area
qCargo = q.cargo
qPrevDimIndex = q.prev[dimIndex]
if (length > 1):
hvol = (qPrevDimIndex.volume[dimIndex] + (qPrevDimIndex.area[dimIndex] * (qCargo[dimIndex] - qPrevDimIndex.cargo[dimIndex])))
else:
qArea[0] = 1
qArea[1:(dimIndex + 1)] = [(qArea[i] * (- qCargo[i])) for i in range(dimIndex)]
q.volume[dimIndex] = hvol
if (q.ignore >= dimIndex):
qArea[dimIndex] = qPrevDimIndex.area[dimIndex]
else:
qArea[dimIndex] = hvRecursive((dimIndex - 1), length, bounds)
if (qArea[dimIndex] <= qPrevDimIndex.area[dimIndex]):
q.ignore = dimIndex
while (p is not sentinel):
pCargoDimIndex = p.cargo[dimIndex]
hvol += (q.area[dimIndex] * (pCargoDimIndex - q.cargo[dimIndex]))
bounds[dimIndex] = pCargoDimIndex
reinsert(p, dimIndex, bounds)
length += 1
q = p
p = p.next[dimIndex]
q.volume[dimIndex] = hvol
if (q.ignore >= dimIndex):
q.area[dimIndex] = q.prev[dimIndex].area[dimIndex]
else:
q.area[dimIndex] = hvRecursive((dimIndex - 1), length, bounds)
if (q.area[dimIndex] <= q.prev[dimIndex].area[dimIndex]):
q.ignore = dimIndex
hvol -= (q.area[dimIndex] * q.cargo[dimIndex])
return hvol
def preProcess(self, front):
'Sets up the list data structure needed for calculation.'
dimensions = len(self.referencePoint)
nodeList = MultiList(dimensions)
nodes = [MultiList.Node(dimensions, point) for point in front]
for i in range(dimensions):
self.sortByDimension(nodes, i)
nodeList.extend(nodes, i)
self.list = nodeList
def sortByDimension(self, nodes, i):
'Sorts the list of nodes by the i-th value of the contained points.'
decorated = [(node.cargo[i], node) for node in nodes]
decorated.sort(key=(lambda x: x[0]))
nodes[:] = [node for (_, node) in decorated]
|
class MultiList():
'A special data structure needed by FonsecaHyperVolume. \n \n It consists of several doubly linked lists that share common nodes. So, \n every node has multiple predecessors and successors, one in every list.\n\n '
class Node():
def __init__(self, numberLists, cargo=None):
self.cargo = cargo
self.next = ([None] * numberLists)
self.prev = ([None] * numberLists)
self.ignore = 0
self.area = ([0.0] * numberLists)
self.volume = ([0.0] * numberLists)
def __str__(self):
return str(self.cargo)
def __init__(self, numberLists):
"Constructor. \n \n Builds 'numberLists' doubly linked lists.\n\n "
self.numberLists = numberLists
self.sentinel = MultiList.Node(numberLists)
self.sentinel.next = ([self.sentinel] * numberLists)
self.sentinel.prev = ([self.sentinel] * numberLists)
def __str__(self):
strings = []
for i in range(self.numberLists):
currentList = []
node = self.sentinel.next[i]
while (node != self.sentinel):
currentList.append(str(node))
node = node.next[i]
strings.append(str(currentList))
stringRepr = ''
for string in strings:
stringRepr += (string + '\n')
return stringRepr
def __len__(self):
'Returns the number of lists that are included in this MultiList.'
return self.numberLists
def getLength(self, i):
'Returns the length of the i-th list.'
length = 0
sentinel = self.sentinel
node = sentinel.next[i]
while (node != sentinel):
length += 1
node = node.next[i]
return length
def append(self, node, index):
'Appends a node to the end of the list at the given index.'
lastButOne = self.sentinel.prev[index]
node.next[index] = self.sentinel
node.prev[index] = lastButOne
self.sentinel.prev[index] = node
lastButOne.next[index] = node
def extend(self, nodes, index):
'Extends the list at the given index with the nodes.'
sentinel = self.sentinel
for node in nodes:
lastButOne = sentinel.prev[index]
node.next[index] = sentinel
node.prev[index] = lastButOne
sentinel.prev[index] = node
lastButOne.next[index] = node
def remove(self, node, index, bounds):
"Removes and returns 'node' from all lists in [0, 'index'[."
for i in range(index):
predecessor = node.prev[i]
successor = node.next[i]
predecessor.next[i] = successor
successor.prev[i] = predecessor
if (bounds[i] > node.cargo[i]):
bounds[i] = node.cargo[i]
return node
def reinsert(self, node, index, bounds):
"\n Inserts 'node' at the position it had in all lists in [0, 'index'[\n before it was removed. This method assumes that the next and previous \n nodes of the node that is reinserted are in the list.\n\n "
for i in range(index):
node.prev[i].next[i] = node
node.next[i].prev[i] = node
if (bounds[i] > node.cargo[i]):
bounds[i] = node.cargo[i]
|
def load_dataset(path, s_label):
data = pd.read_csv(path)
data['workclass'] = data['workclass'].replace('?', 'Private')
data['occupation'] = data['occupation'].replace('?', 'Prof-specialty')
data['native-country'] = data['native-country'].replace('?', 'United-States')
data.education = data.education.replace(['Preschool', '1st-4th', '5th-6th', '7th-8th', '9th', '10th', '11th', '12th'], 'left')
data.education = data.education.replace('HS-grad', 'school')
data.education = data.education.replace(['Assoc-voc', 'Assoc-acdm', 'Prof-school', 'Some-college'], 'higher')
data.education = data.education.replace('Bachelors', 'undergrad')
data.education = data.education.replace('Masters', 'grad')
data.education = data.education.replace('Doctorate', 'doc')
data['marital-status'] = data['marital-status'].replace(['Married-civ-spouse', 'Married-AF-spouse'], 'married')
data['marital-status'] = data['marital-status'].replace(['Never-married', 'Divorced', 'Separated', 'Widowed', 'Married-spouse-absent'], 'not-married')
data.income = data.income.replace('<=50K', 0)
data.income = data.income.replace('>50K', 1)
data.gender = data.gender.replace('Male', 0)
data.gender = data.gender.replace('Female', 1)
data1 = data.copy()
data1 = pd.get_dummies(data1)
data1 = data1.drop(['income', s_label], axis=1)
X = StandardScaler().fit(data1).transform(data1)
y = data['income'].values
s = data[s_label].values
return (X, y, s)
|
class ADULT(data.Dataset):
def __init__(self, root='data/adult', split='train', sensible_attribute='gender', **kwargs):
assert (split in ['train', 'val', 'test'])
path = os.path.join(root, 'adult.csv')
(x, y, s) = load_dataset(path, sensible_attribute)
x = torch.from_numpy(x).float()
y = torch.from_numpy(y).long()
s = torch.from_numpy(s).long()
(x_train, x_test, y_train, y_test, s_train, s_test) = train_test_split(x, y, s, test_size=0.2, random_state=1)
(x_train, x_val, y_train, y_val, s_train, s_val) = train_test_split(x_train, y_train, s_train, test_size=0.125, random_state=1)
if (split == 'train'):
self.x = x_train
self.y = y_train
self.s = s_train
elif (split == 'val'):
self.x = x_val
self.y = y_val
self.s = s_val
elif (split == 'test'):
self.x = x_test
self.y = y_test
self.s = s_test
print('loaded {} instances for split {}. y positives={}, {} positives={}'.format(len(self.y), split, sum(self.y), sensible_attribute, sum(self.s)))
def __len__(self):
'__len__'
return len(self.x)
def __getitem__(self, index):
return dict(data=self.x[index], labels=self.y[index], sensible_attribute=self.s[index])
def task_names(self):
return None
|
def load_dataset(root, s_label):
raw_data = pd.read_csv(os.path.join(root, 'compas-scores-two-years.csv'))
data = raw_data[(((((raw_data['days_b_screening_arrest'] <= 30) & (raw_data['days_b_screening_arrest'] >= (- 30))) & (raw_data['is_recid'] != (- 1))) & (raw_data['c_charge_degree'] != 'O')) & (raw_data['score_text'] != 'N/A'))]
data = data[['age', 'c_charge_degree', 'race', 'age_cat', 'score_text', 'sex', 'priors_count', 'days_b_screening_arrest', 'decile_score', 'is_recid', 'two_year_recid', 'c_jail_in', 'c_jail_out']]
def date_from_str(s):
return datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
data['c_jail_in'] = data['c_jail_in'].apply(date_from_str)
data['c_jail_out'] = data['c_jail_out'].apply(date_from_str)
data['length_of_stay'] = (data['c_jail_out'] - data['c_jail_in'])
data['length_of_stay'] = data['length_of_stay'].astype('timedelta64[h]')
data = data.drop(['c_jail_in', 'c_jail_out'], axis=1)
data['sex'] = data['sex'].replace('Male', 0)
data['sex'] = data['sex'].replace('Female', 1)
data1 = data.copy()
data1 = data1.drop(['two_year_recid', 'sex'], axis=1)
data1 = pd.get_dummies(data1)
x = StandardScaler().fit(data1).transform(data1)
y = data['two_year_recid'].values
s = data['sex'].values
return (x, y, s)
|
class Compas(torch.utils.data.Dataset):
def __init__(self, split, root='data/compas', sensible_attribute='sex', **kwargs):
assert (split in ['train', 'val', 'test'])
(x, y, s) = load_dataset(root, sensible_attribute)
x = torch.from_numpy(x).float()
y = torch.from_numpy(y).long()
s = torch.from_numpy(s).long()
(x_train, x_test, y_train, y_test, s_train, s_test) = train_test_split(x, y, s, test_size=0.2, random_state=1)
(x_train, x_val, y_train, y_val, s_train, s_val) = train_test_split(x_train, y_train, s_train, test_size=0.125, random_state=1)
if (split == 'train'):
self.x = x_train
self.y = y_train
self.s = s_train
elif (split == 'val'):
self.x = x_val
self.y = y_val
self.s = s_val
elif (split == 'test'):
self.x = x_test
self.y = y_test
self.s = s_test
print('loaded {} instances for split {}. y positives={}, {} positives={}'.format(len(self.y), split, sum(self.y), sensible_attribute, sum(self.s)))
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return dict(data=self.x[index], labels=self.y[index], sensible_attribute=self.s[index])
def task_names(self):
return None
|
def load_dataset(root, s_label):
data = pd.read_csv(os.path.join(root, 'UCI_Credit_Card.csv'))
to_categorical = ['EDUCATION', 'MARRIAGE', 'PAY_0', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6']
for column in to_categorical:
data[column] = data[column].astype('category')
data.SEX = data.SEX.replace(1, 0)
data.SEX = data.SEX.replace(2, 1)
data1 = data.copy()
data1 = data1.drop(['default.payment.next.month', s_label], axis=1)
data1 = pd.get_dummies(data1)
x = StandardScaler().fit(data1).transform(data1)
y = data['default.payment.next.month'].values
s = data[s_label].values
return (x, y, s)
|
class Credit(torch.utils.data.Dataset):
def __init__(self, split, root='data/credit', sensible_attribute='SEX', **kwargs):
assert (split in ['train', 'val', 'test'])
(x, y, s) = load_dataset(root, sensible_attribute)
x = torch.from_numpy(x).float()
y = torch.from_numpy(y).long()
s = torch.from_numpy(s).long()
(x_train, x_test, y_train, y_test, s_train, s_test) = train_test_split(x, y, s, test_size=0.2, random_state=1)
(x_train, x_val, y_train, y_val, s_train, s_val) = train_test_split(x_train, y_train, s_train, test_size=0.125, random_state=1)
if (split == 'train'):
self.x = x_train
self.y = y_train
self.s = s_train
elif (split == 'val'):
self.x = x_val
self.y = y_val
self.s = s_val
elif (split == 'test'):
self.x = x_test
self.y = y_test
self.s = s_test
print('loaded {} instances for split {}. y positives={}, {} positives={}'.format(len(self.y), split, sum(self.y), sensible_attribute, sum(self.s)))
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return dict(data=self.x[index], labels=self.y[index], sensible_attribute=self.s[index])
def task_names(self):
return None
|
def method_from_name(method, **kwargs):
if (method == 'ParetoMTL'):
return ParetoMTLMethod(**kwargs)
elif ('cosmos' in method):
return COSMOSMethod(**kwargs)
elif (method == 'SingleTask'):
return SingleTaskMethod(**kwargs)
elif ('hyper' in method):
return HypernetMethod(**kwargs)
elif (method == 'mgda'):
return MGDAMethod(**kwargs)
elif (method == 'uniform'):
return UniformScalingMethod(**kwargs)
else:
raise ValueError('Unkown method {}'.format(method))
|
def evaluate(j, e, method, scores, data_loader, logdir, reference_point, split, result_dict):
assert (split in ['train', 'val', 'test'])
global volume_max
global epoch_max
score_values = np.array([])
for batch in data_loader:
batch = utils.dict_to_cuda(batch)
s = []
for l in method.eval_step(batch):
batch.update(l)
s.append([s(**batch) for s in scores])
if (score_values.size == 0):
score_values = np.array(s)
else:
score_values += np.array(s)
score_values /= len(data_loader)
hv = HyperVolume(reference_point)
volume = (hv.compute(score_values) if (score_values.shape[1] < 5) else (- 1))
if (len(scores) == 2):
pareto_front = utils.ParetoFront([s.__class__.__name__ for s in scores], logdir, '{}_{:03d}'.format(split, e))
pareto_front.append(score_values)
pareto_front.plot()
result = {'scores': score_values.tolist(), 'hv': volume}
if (split == 'val'):
if (volume > volume_max):
volume_max = volume
epoch_max = e
result.update({'max_epoch_so_far': epoch_max, 'max_volume_so_far': volume_max, 'training_time_so_far': elapsed_time})
elif (split == 'test'):
result.update({'training_time_so_far': elapsed_time})
result.update(method.log())
if (f'epoch_{e}' in result_dict[f'start_{j}']):
result_dict[f'start_{j}'][f'epoch_{e}'].update(result)
else:
result_dict[f'start_{j}'][f'epoch_{e}'] = result
with open((pathlib.Path(logdir) / f'{split}_results.json'), 'w') as file:
json.dump(result_dict, file)
return result_dict
|
def main(settings):
print('start processig with settings', settings)
utils.set_seed(settings['seed'])
global elapsed_time
logdir = os.path.join(settings['logdir'], settings['method'], settings['dataset'], utils.get_runname(settings))
pathlib.Path(logdir).mkdir(parents=True, exist_ok=True)
train_set = utils.dataset_from_name(split='train', **settings)
val_set = utils.dataset_from_name(split='val', **settings)
test_set = utils.dataset_from_name(split='test', **settings)
train_loader = data.DataLoader(train_set, settings['batch_size'], shuffle=True, num_workers=settings['num_workers'])
val_loader = data.DataLoader(val_set, settings['batch_size'], shuffle=True, num_workers=settings['num_workers'])
test_loader = data.DataLoader(test_set, settings['batch_size'], settings['num_workers'])
objectives = from_name(settings.pop('objectives'), train_set.task_names())
scores = from_objectives(objectives)
rm1 = utils.RunningMean(400)
rm2 = utils.RunningMean(400)
method = method_from_name(objectives=objectives, **settings)
train_results = dict(settings=settings, num_parameters=utils.num_parameters(method.model_params()))
val_results = dict(settings=settings, num_parameters=utils.num_parameters(method.model_params()))
test_results = dict(settings=settings, num_parameters=utils.num_parameters(method.model_params()))
with open((pathlib.Path(logdir) / 'settings.json'), 'w') as file:
json.dump(train_results, file)
for j in range(settings['num_starts']):
train_results[f'start_{j}'] = {}
val_results[f'start_{j}'] = {}
test_results[f'start_{j}'] = {}
optimizer = torch.optim.Adam(method.model_params(), settings['lr'])
if settings['use_scheduler']:
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, settings['scheduler_milestones'], gamma=settings['scheduler_gamma'])
for e in range(settings['epochs']):
print(f'Epoch {e}')
tick = time.time()
method.new_epoch(e)
for (b, batch) in enumerate(train_loader):
batch = utils.dict_to_cuda(batch)
optimizer.zero_grad()
stats = method.step(batch)
optimizer.step()
(loss, sim) = (stats if isinstance(stats, tuple) else (stats, 0))
print('Epoch {:03d}, batch {:03d}, train_loss {:.4f}, sim {:.4f}, rm train_loss {:.3f}, rm sim {:.3f}'.format(e, b, loss, sim, rm1(loss), rm2(sim)))
tock = time.time()
elapsed_time += (tock - tick)
if settings['use_scheduler']:
val_results[f'start_{j}'][f'epoch_{e}'] = {'lr': scheduler.get_last_lr()[0]}
scheduler.step()
if ((settings['train_eval_every'] > 0) and (((e + 1) % settings['train_eval_every']) == 0)):
train_results = evaluate(j, e, method, scores, train_loader, logdir, reference_point=settings['reference_point'], split='train', result_dict=train_results)
if ((settings['eval_every'] > 0) and (((e + 1) % settings['eval_every']) == 0)):
val_results = evaluate(j, e, method, scores, val_loader, logdir, reference_point=settings['reference_point'], split='val', result_dict=val_results)
test_results = evaluate(j, e, method, scores, test_loader, logdir, reference_point=settings['reference_point'], split='test', result_dict=test_results)
if ((settings['checkpoint_every'] > 0) and (((e + 1) % settings['checkpoint_every']) == 0)):
pathlib.Path(os.path.join(logdir, 'checkpoints')).mkdir(parents=True, exist_ok=True)
torch.save(method.model.state_dict(), os.path.join(logdir, 'checkpoints', 'c_{}-{:03d}.pth'.format(j, e)))
print('epoch_max={}, val_volume_max={}'.format(epoch_max, volume_max))
pathlib.Path(os.path.join(logdir, 'checkpoints')).mkdir(parents=True, exist_ok=True)
torch.save(method.model.state_dict(), os.path.join(logdir, 'checkpoints', 'c_{}-{:03d}.pth'.format(j, 999999)))
return volume_max
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', '-d', default='mm', help='The dataset to run on.')
parser.add_argument('--method', '-m', default='cosmos', help='The method to generate the Pareto front.')
parser.add_argument('--seed', '-s', default=1, type=int, help='Seed')
parser.add_argument('--task_id', '-t', default=None, type=int, help='Task id to run single task in parallel. If not set then sequentially.')
args = parser.parse_args()
settings = s.generic
if (args.method == 'single_task'):
settings.update(s.SingleTaskSolver)
if (args.task_id is not None):
settings['num_starts'] = 1
settings['task_id'] = args.task_id
elif (args.method == 'cosmos'):
settings.update(s.cosmos)
elif (args.method == 'hyper_ln'):
settings.update(s.hyperSolver_ln)
elif (args.method == 'hyper_epo'):
settings.update(s.hyperSolver_epo)
elif (args.method == 'pmtl'):
settings.update(s.paretoMTL)
elif (args.method == 'mgda'):
settings.update(s.mgda)
elif (args.method == 'uniform'):
settings.update(s.uniform_scaling)
if (args.dataset == 'mm'):
settings.update(s.multi_mnist)
elif (args.dataset == 'adult'):
settings.update(s.adult)
elif (args.dataset == 'mfm'):
settings.update(s.multi_fashion_mnist)
elif (args.dataset == 'fm'):
settings.update(s.multi_fashion)
elif (args.dataset == 'credit'):
settings.update(s.credit)
elif (args.dataset == 'compass'):
settings.update(s.compass)
elif (args.dataset == 'celeba'):
settings.update(s.celeba)
settings['seed'] = args.seed
return settings
|
class BaseMethod():
def model_params(self):
return list(self.model.parameters())
def new_epoch(self, e):
self.model.train()
@abstractmethod
def step(self, batch):
raise NotImplementedError()
def log(self):
return {}
@abstractmethod
def eval_step(self, batch):
raise NotImplementedError()
|
class Upsampler(nn.Module):
def __init__(self, K, child_model, input_dim):
'\n In case of tabular data: append the sampled rays to the data instances (no upsampling)\n In case of image data: use a transposed CNN for the sampled rays.\n '
super().__init__()
if (len(input_dim) == 1):
self.tabular = True
elif (len(input_dim) == 3):
self.tabular = False
self.transposed_cnn = nn.Sequential(nn.ConvTranspose2d(K, K, kernel_size=4, stride=1, padding=0, bias=False), nn.ReLU(inplace=True), nn.ConvTranspose2d(K, K, kernel_size=6, stride=2, padding=1, bias=False), nn.ReLU(inplace=True), nn.Upsample(input_dim[(- 2):]))
else:
raise ValueError(f'Unknown dataset structure, expected 1 or 3 dimensions, got {dim}')
self.child_model = child_model
def forward(self, batch):
x = batch['data']
b = x.shape[0]
a = batch['alpha'].repeat(b, 1)
if (not self.tabular):
a = a.reshape(b, len(batch['alpha']), 1, 1)
a = self.transposed_cnn(a)
x = torch.cat((x, a), dim=1)
return self.child_model(dict(data=x))
def private_params(self):
if hasattr(self.child_model, 'private_params'):
return self.child_model.private_params()
else:
return []
|
class COSMOSMethod(BaseMethod):
def __init__(self, objectives, alpha, lamda, dim, n_test_rays, **kwargs):
'\n Instanciate the cosmos solver.\n\n Args:\n objectives: A list of objectives\n alpha: Dirichlet sampling parameter (list or float)\n lamda: Cosine similarity penalty\n dim: Dimensions of the data\n n_test_rays: The number of test rays used for evaluation.\n '
self.objectives = objectives
self.K = len(objectives)
self.alpha = alpha
self.n_test_rays = n_test_rays
self.lamda = lamda
dim = list(dim)
dim[0] = (dim[0] + self.K)
model = model_from_dataset(method='cosmos', dim=dim, **kwargs)
self.model = Upsampler(self.K, model, dim).cuda()
self.n_params = num_parameters(self.model)
print('Number of parameters: {}'.format(self.n_params))
def step(self, batch):
if isinstance(self.alpha, list):
batch['alpha'] = torch.from_numpy(np.random.dirichlet(self.alpha, 1).astype(np.float32).flatten()).cuda()
elif (self.alpha > 0):
batch['alpha'] = torch.from_numpy(np.random.dirichlet([self.alpha for _ in range(self.K)], 1).astype(np.float32).flatten()).cuda()
else:
raise ValueError(f'Unknown value for alpha: {self.alpha}, expecting list or float.')
self.model.zero_grad()
logits = self.model(batch)
batch.update(logits)
loss_total = None
task_losses = []
for (a, objective) in zip(batch['alpha'], self.objectives):
task_loss = objective(**batch)
loss_total = ((a * task_loss) if (not loss_total) else (loss_total + (a * task_loss)))
task_losses.append(task_loss)
cossim = torch.nn.functional.cosine_similarity(torch.stack(task_losses), batch['alpha'], dim=0)
loss_total -= (self.lamda * cossim)
loss_total.backward()
return (loss_total.item(), cossim.item())
def eval_step(self, batch, test_rays=None):
self.model.eval()
logits = []
with torch.no_grad():
if (test_rays is None):
test_rays = circle_points(self.n_test_rays, dim=self.K)
for ray in test_rays:
ray = torch.from_numpy(ray.astype(np.float32)).cuda()
ray /= ray.sum()
batch['alpha'] = ray
logits.append(self.model(batch))
return logits
|
class MGDAMethod(BaseMethod):
def __init__(self, objectives, approximate_norm_solution, normalization_type, **kwargs) -> None:
super().__init__()
self.objectives = objectives
self.approximate_norm_solution = approximate_norm_solution
self.normalization_type = normalization_type
self.model = model_from_dataset(method='mdga', **kwargs).cuda()
def new_epoch(self, e):
self.model.train()
def step(self, batch):
if self.approximate_norm_solution:
self.model.zero_grad()
with torch.no_grad():
rep = self.model.forward_feature_extraction(batch)
gradients = []
obj_values = []
for (i, objective) in enumerate(self.objectives):
self.model.zero_grad()
logits = self.model.forward_linear(rep, i)
batch.update(logits)
output = objective(**batch)
output.backward()
obj_values.append(output.item())
gradients.append({})
private_params = (self.model.private_params() if hasattr(self.model, 'private_params') else [])
for (name, param) in self.model.named_parameters():
not_private = all([(p not in name) for p in private_params])
if (not_private and param.requires_grad and (param.grad is not None)):
gradients[i][name] = param.grad.data.detach().clone()
param.grad = None
self.model.zero_grad()
grads = gradients
else:
(grads, obj_values) = calc_gradients(batch, self.model, self.objectives)
gn = gradient_normalizers(grads, obj_values, self.normalization_type)
for t in range(len(self.objectives)):
for gr_i in grads[t]:
grads[t][gr_i] = (grads[t][gr_i] / gn[t])
grads = [[v for v in d.values()] for d in grads]
(sol, min_norm) = MinNormSolver.find_min_norm_element(grads)
self.model.zero_grad()
logits = self.model(batch)
batch.update(logits)
loss_total = None
for (a, objective) in zip(sol, self.objectives):
task_loss = objective(**batch)
loss_total = ((a * task_loss) if (not loss_total) else (loss_total + (a * task_loss)))
loss_total.backward()
return (loss_total.item(), 0)
def eval_step(self, batch):
self.model.eval()
return [self.model(batch)]
|
class PHNHyper(nn.Module):
'Hypernetwork\n '
def __init__(self, kernel_size: List[int], ray_hidden_dim=100, out_dim=10, target_hidden_dim=50, n_kernels=10, n_conv_layers=2, n_hidden=1, n_tasks=2):
super().__init__()
self.n_conv_layers = n_conv_layers
self.n_hidden = n_hidden
self.n_tasks = n_tasks
assert (len(kernel_size) == n_conv_layers), 'kernel_size is list with same dim as number of conv layers holding kernel size for each conv layer'
self.ray_mlp = nn.Sequential(nn.Linear(2, ray_hidden_dim), nn.ReLU(inplace=True), nn.Linear(ray_hidden_dim, ray_hidden_dim), nn.ReLU(inplace=True), nn.Linear(ray_hidden_dim, ray_hidden_dim))
self.conv_0_weights = nn.Linear(ray_hidden_dim, ((n_kernels * kernel_size[0]) * kernel_size[0]))
self.conv_0_bias = nn.Linear(ray_hidden_dim, n_kernels)
for i in range(1, n_conv_layers):
p = ((2 ** (i - 1)) * n_kernels)
c = ((2 ** i) * n_kernels)
setattr(self, f'conv_{i}_weights', nn.Linear(ray_hidden_dim, (((c * p) * kernel_size[i]) * kernel_size[i])))
setattr(self, f'conv_{i}_bias', nn.Linear(ray_hidden_dim, c))
latent = 25
self.hidden_0_weights = nn.Linear(ray_hidden_dim, (((target_hidden_dim * (2 ** i)) * n_kernels) * latent))
self.hidden_0_bias = nn.Linear(ray_hidden_dim, target_hidden_dim)
for j in range(n_tasks):
setattr(self, f'task_{j}_weights', nn.Linear(ray_hidden_dim, (target_hidden_dim * out_dim)))
setattr(self, f'task_{j}_bias', nn.Linear(ray_hidden_dim, out_dim))
def shared_parameters(self):
return list([p for (n, p) in self.named_parameters() if ('task' not in n)])
def forward(self, ray):
features = self.ray_mlp(ray)
out_dict = {}
layer_types = ['conv', 'hidden', 'task']
for i in layer_types:
if (i == 'conv'):
n_layers = self.n_conv_layers
elif (i == 'hidden'):
n_layers = self.n_hidden
elif (i == 'task'):
n_layers = self.n_tasks
for j in range(n_layers):
out_dict[f'{i}{j}.weights'] = getattr(self, f'{i}_{j}_weights')(features)
out_dict[f'{i}{j}.bias'] = getattr(self, f'{i}_{j}_bias')(features).flatten()
return out_dict
|
class PHNTarget(nn.Module):
'Target network\n '
def __init__(self, kernel_size, n_kernels=10, out_dim=10, target_hidden_dim=50, n_conv_layers=2, n_tasks=2):
super().__init__()
assert (len(kernel_size) == n_conv_layers), 'kernel_size is list with same dim as number of conv layers holding kernel size for each conv layer'
self.n_kernels = n_kernels
self.kernel_size = kernel_size
self.out_dim = out_dim
self.n_conv_layers = n_conv_layers
self.n_tasks = n_tasks
self.target_hidden_dim = target_hidden_dim
def forward(self, x, weights=None):
x = F.conv2d(x, weight=weights['conv0.weights'].reshape(self.n_kernels, 1, self.kernel_size[0], self.kernel_size[0]), bias=weights['conv0.bias'], stride=1)
x = F.relu(x)
x = F.max_pool2d(x, 2)
for i in range(1, self.n_conv_layers):
x = F.conv2d(x, weight=weights[f'conv{i}.weights'].reshape(int(((2 ** i) * self.n_kernels)), int(((2 ** (i - 1)) * self.n_kernels)), self.kernel_size[i], self.kernel_size[i]), bias=weights[f'conv{i}.bias'], stride=1)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
x = F.linear(x, weight=weights['hidden0.weights'].reshape(self.target_hidden_dim, x.shape[(- 1)]), bias=weights['hidden0.bias'])
logits = []
for j in range(self.n_tasks):
logits.append(F.linear(x, weight=weights[f'task{j}.weights'].reshape(self.out_dim, self.target_hidden_dim), bias=weights[f'task{j}.bias']))
return logits
|
class LeNetPHNHyper(PHNHyper):
pass
|
class LeNetPHNTargetWrapper(PHNTarget):
def forward(self, x, weights=None):
logits = super().forward(x, weights)
return dict(logits_l=logits[0], logits_r=logits[1])
|
class FCPHNHyper(nn.Module):
def __init__(self, dim, ray_hidden_dim=100, n_tasks=2):
super().__init__()
self.feature_dim = dim[0]
self.ray_mlp = nn.Sequential(nn.Linear(n_tasks, ray_hidden_dim), nn.ReLU(inplace=True), nn.Linear(ray_hidden_dim, ray_hidden_dim), nn.ReLU(inplace=True), nn.Linear(ray_hidden_dim, ray_hidden_dim))
self.fc_0_weights = nn.Linear(ray_hidden_dim, (60 * self.feature_dim))
self.fc_0_bias = nn.Linear(ray_hidden_dim, 60)
self.fc_1_weights = nn.Linear(ray_hidden_dim, (25 * 60))
self.fc_1_bias = nn.Linear(ray_hidden_dim, 25)
self.fc_2_weights = nn.Linear(ray_hidden_dim, (1 * 25))
self.fc_2_bias = nn.Linear(ray_hidden_dim, 1)
def forward(self, ray):
x = self.ray_mlp(ray)
out_dict = {'fc0.weights': self.fc_0_weights(x).reshape(60, self.feature_dim), 'fc0.bias': self.fc_0_bias(x), 'fc1.weights': self.fc_1_weights(x).reshape(25, 60), 'fc1.bias': self.fc_1_bias(x), 'fc2.weights': self.fc_2_weights(x).reshape(1, 25), 'fc2.bias': self.fc_2_bias(x)}
return out_dict
|
class FCPHNTarget(nn.Module):
def forward(self, x, weights):
x = F.linear(x, weight=weights['fc0.weights'], bias=weights['fc0.bias'])
x = F.relu(x)
x = F.linear(x, weight=weights['fc1.weights'], bias=weights['fc1.bias'])
x = F.relu(x)
x = F.linear(x, weight=weights['fc2.weights'], bias=weights['fc2.bias'])
return {'logits': x}
|
class HypernetMethod(BaseMethod):
def __init__(self, objectives, dim, n_test_rays, alpha, internal_solver, **kwargs):
self.objectives = objectives
self.n_test_rays = n_test_rays
self.alpha = alpha
self.K = len(objectives)
if (len(dim) == 1):
hnet = FCPHNHyper(dim, ray_hidden_dim=100)
net = FCPHNTarget()
elif (len(dim) == 3):
hnet: nn.Module = LeNetPHNHyper([9, 5], ray_hidden_dim=100)
net: nn.Module = LeNetPHNTargetWrapper([9, 5])
else:
raise ValueError(f'Unkown dim {dim}, expected len 1 or len 3')
print('Number of parameters: {}'.format(num_parameters(hnet)))
self.model = hnet.cuda()
self.net = net.cuda()
if (internal_solver == 'linear'):
self.solver = LinearScalarizationSolver(n_tasks=len(objectives))
elif (internal_solver == 'epo'):
self.solver = EPOSolver(n_tasks=len(objectives), n_params=num_parameters(hnet))
def step(self, batch):
if (self.alpha > 0):
ray = torch.from_numpy(np.random.dirichlet([self.alpha for _ in range(len(self.objectives))], 1).astype(np.float32).flatten()).cuda()
else:
alpha = torch.empty(1).uniform_(0.0, 1.0)
ray = torch.tensor([alpha.item(), (1 - alpha.item())]).cuda()
img = batch['data']
weights = self.model(ray)
batch.update(self.net(img, weights))
losses = torch.stack([o(**batch) for o in self.objectives])
ray = ray.squeeze(0)
loss = self.solver(losses, ray, list(self.model.parameters()))
loss.backward()
return loss.item()
def eval_step(self, batch):
self.model.eval()
test_rays = circle_points(self.n_test_rays, dim=self.K)
logits = []
for ray in test_rays:
ray = torch.from_numpy(ray.astype(np.float32)).cuda()
ray /= ray.sum()
weights = self.model(ray)
logits.append(self.net(batch['data'], weights))
return logits
|
class Solver():
def __init__(self, n_tasks):
super().__init__()
self.n_tasks = n_tasks
@abstractmethod
def get_weighted_loss(self, losses, ray, parameters=None, **kwargs):
pass
def __call__(self, losses, ray, parameters, **kwargs):
return self.get_weighted_loss(losses, ray, parameters, **kwargs)
|
class LinearScalarizationSolver(Solver):
'For LS we use the preference ray to weigh the losses\n '
def __init__(self, n_tasks):
super().__init__(n_tasks)
def get_weighted_loss(self, losses, ray, parameters=None, **kwargs):
return (losses * ray).sum()
|
class EPOSolver(Solver):
'Wrapper over EPO\n '
def __init__(self, n_tasks, n_params):
super().__init__(n_tasks)
self.solver = EPO(n_tasks=n_tasks, n_params=n_params)
def get_weighted_loss(self, losses, ray, parameters=None, **kwargs):
assert (parameters is not None)
return self.solver.get_weighted_loss(losses, ray, parameters)
|
class EPO():
def __init__(self, n_tasks, n_params):
self.n_tasks = n_tasks
self.n_params = n_params
def __call__(self, losses, ray, parameters):
return self.get_weighted_loss(losses, ray, parameters)
@staticmethod
def _flattening(grad):
return torch.cat(tuple((g.reshape((- 1)) for (i, g) in enumerate(grad))), axis=0)
def get_weighted_loss(self, losses, ray, parameters):
lp = ExactParetoLP(m=self.n_tasks, n=self.n_params, r=ray.cpu().numpy())
grads = []
for (i, loss) in enumerate(losses):
g = torch.autograd.grad(loss, parameters, retain_graph=True)
flat_grad = self._flattening(g)
grads.append(flat_grad.data)
G = torch.stack(grads)
GG_T = (G @ G.T)
GG_T = GG_T.detach().cpu().numpy()
numpy_losses = losses.detach().cpu().numpy()
try:
alpha = lp.get_alpha(numpy_losses, G=GG_T, C=True)
except Exception as excep:
print(excep)
alpha = None
if (alpha is None):
alpha = (ray / ray.sum()).cpu().numpy()
alpha *= self.n_tasks
alpha = torch.from_numpy(alpha).to(losses.device)
weighted_loss = torch.sum((losses * alpha))
return weighted_loss
|
class ExactParetoLP(object):
'modifications of the code in https://github.com/dbmptr/EPOSearch\n '
def __init__(self, m, n, r, eps=0.0001):
cvxopt.glpk.options['msg_lev'] = 'GLP_MSG_OFF'
self.m = m
self.n = n
self.r = r
self.eps = eps
self.last_move = None
self.a = cp.Parameter(m)
self.C = cp.Parameter((m, m))
self.Ca = cp.Parameter(m)
self.rhs = cp.Parameter(m)
self.alpha = cp.Variable(m)
obj_bal = cp.Maximize((self.alpha @ self.Ca))
constraints_bal = [(self.alpha >= 0), (cp.sum(self.alpha) == 1), ((self.C @ self.alpha) >= self.rhs)]
self.prob_bal = cp.Problem(obj_bal, constraints_bal)
obj_dom = cp.Maximize(cp.sum((self.alpha @ self.C)))
constraints_res = [(self.alpha >= 0), (cp.sum(self.alpha) == 1), ((self.alpha @ self.Ca) >= (- cp.neg(cp.max(self.Ca)))), ((self.C @ self.alpha) >= 0)]
constraints_rel = [(self.alpha >= 0), (cp.sum(self.alpha) == 1), ((self.C @ self.alpha) >= 0)]
self.prob_dom = cp.Problem(obj_dom, constraints_res)
self.prob_rel = cp.Problem(obj_dom, constraints_rel)
self.gamma = 0
self.mu_rl = 0
def get_alpha(self, l, G, r=None, C=False, relax=False):
r = (self.r if (r is None) else r)
assert (len(l) == len(G) == len(r) == self.m), 'length != m'
(rl, self.mu_rl, self.a.value) = adjustments(l, r)
self.C.value = (G if C else (G @ G.T))
self.Ca.value = (self.C.value @ self.a.value)
if (self.mu_rl > self.eps):
J = (self.Ca.value > 0)
if (len(np.where(J)[0]) > 0):
J_star_idx = np.where((rl == np.max(rl)))[0]
self.rhs.value = self.Ca.value.copy()
self.rhs.value[J] = (- np.inf)
self.rhs.value[J_star_idx] = 0
else:
self.rhs.value = np.zeros_like(self.Ca.value)
self.gamma = self.prob_bal.solve(solver=cp.GLPK, verbose=False)
self.last_move = 'bal'
else:
if relax:
self.gamma = self.prob_rel.solve(solver=cp.GLPK, verbose=False)
else:
self.gamma = self.prob_dom.solve(solver=cp.GLPK, verbose=False)
self.last_move = 'dom'
return self.alpha.value
|
def mu(rl, normed=False):
if len(np.where((rl < 0))[0]):
raise ValueError(f'''rl<0
rl={rl}''')
m = len(rl)
l_hat = (rl if normed else (rl / rl.sum()))
eps = np.finfo(rl.dtype).eps
l_hat = l_hat[(l_hat > eps)]
return np.sum((l_hat * np.log((l_hat * m))))
|
def adjustments(l, r=1):
m = len(l)
rl = (r * l)
l_hat = (rl / rl.sum())
mu_rl = mu(l_hat, normed=True)
a = (r * (np.log((l_hat * m)) - mu_rl))
return (rl, mu_rl, a)
|
def get_d_paretomtl_init(grads, losses, preference_vectors, pref_idx):
' \n calculate the gradient direction for ParetoMTL initialization \n\n Args:\n grads: flattened gradients for each task\n losses: values of the losses for each task\n preference_vectors: all preference vectors u\n pref_idx: which index of u we are currently using\n \n Returns:\n flag: is a feasible initial solution found?\n weight: \n '
flag = False
nobj = losses.shape
current_pref = preference_vectors[pref_idx]
w = (preference_vectors - current_pref)
gx = torch.matmul(w, (losses / torch.norm(losses)))
idx = (gx > 0)
active_constraints = w[idx]
if (torch.sum(idx) <= 0):
flag = True
return (flag, torch.zeros(nobj))
if (torch.sum(idx) == 1):
sol = torch.ones(1).cuda().float()
else:
gx_gradient = torch.matmul(active_constraints, grads)
(sol, nd) = MinNormSolver.find_min_norm_element([[gx_gradient[t]] for t in range(len(gx_gradient))])
sol = torch.Tensor(sol).cuda()
weight = torch.matmul(sol, active_constraints)
return (flag, weight)
|
def get_d_paretomtl(grads, losses, preference_vectors, pref_idx):
'\n calculate the gradient direction for ParetoMTL \n \n Args:\n grads: flattened gradients for each task\n losses: values of the losses for each task\n preference_vectors: all preference vectors u\n pref_idx: which index of u we are currently using\n '
current_weight = preference_vectors[pref_idx]
rest_weights = preference_vectors
w = (rest_weights - current_weight)
gx = torch.matmul(w, (losses / torch.norm(losses)))
idx = (gx > 0)
if (torch.sum(idx) <= 0):
(sol, nd) = MinNormSolver.find_min_norm_element_FW([[grads[t]] for t in range(len(grads))])
return torch.tensor(sol).cuda().float()
else:
vec = torch.cat((grads, torch.matmul(w[idx], grads)))
(sol, nd) = MinNormSolver.find_min_norm_element([[vec[t]] for t in range(len(vec))])
sol = torch.Tensor(sol).cuda()
n = preference_vectors.shape[1]
weights = []
for i in range(n):
weight_i = (sol[i] + torch.sum(torch.stack([(sol[j] * w[idx][((j - n), i)]) for j in torch.arange(n, (n + torch.sum(idx)))])))
weights.append(weight_i)
weight = torch.stack(weights)
return weight
|
class ParetoMTLMethod(BaseMethod):
def __init__(self, objectives, num_starts, **kwargs):
assert (len(objectives) <= 2)
self.objectives = objectives
self.num_pareto_points = num_starts
self.init_solution_found = False
self.model = model_from_dataset(method='paretoMTL', **kwargs).cuda()
self.pref_idx = (- 1)
self.ref_vec = torch.Tensor(circle_points(self.num_pareto_points)).cuda().float()
def new_epoch(self, e):
if (e == 0):
self.pref_idx += 1
reset_weights(self.model)
self.init_solution_found = False
self.e = e
self.model.train()
def log(self):
return {'train_ray': self.ref_vec[self.pref_idx].cpu().numpy().tolist()}
def _find_initial_solution(self, batch):
grads = {}
losses_vec = []
for i in range(len(self.objectives)):
self.model.zero_grad()
batch.update(self.model(batch))
task_loss = self.objectives[i](**batch)
losses_vec.append(task_loss.data)
task_loss.backward()
grads[i] = []
private_params = (self.model.private_params() if hasattr(self.model, 'private_params') else [])
for (name, param) in self.model.named_parameters():
if ((name not in private_params) and (param.grad is not None)):
grads[i].append(Variable(param.grad.data.clone().flatten(), requires_grad=False))
grads_list = [torch.cat([g for g in grads[i]]) for i in range(len(grads))]
grads = torch.stack(grads_list)
losses_vec = torch.stack(losses_vec)
(self.init_solution_found, weight_vec) = get_d_paretomtl_init(grads, losses_vec, self.ref_vec, self.pref_idx)
if self.init_solution_found:
print('Initial solution found')
self.model.zero_grad()
for i in range(len(self.objectives)):
batch.update(self.model(batch))
task_loss = self.objectives[i](**batch)
if (i == 0):
loss_total = (weight_vec[i] * task_loss)
else:
loss_total = (loss_total + (weight_vec[i] * task_loss))
loss_total.backward()
return loss_total.item()
def step(self, batch):
if ((self.e < 2) and (not self.init_solution_found)):
return self._find_initial_solution(batch)
else:
(gradients, obj_values) = calc_gradients(batch, self.model, self.objectives)
grads = [torch.cat([torch.flatten(v) for (k, v) in sorted(grads.items())]) for grads in gradients]
grads = torch.stack(grads)
losses_vec = torch.Tensor(obj_values).cuda()
weight_vec = get_d_paretomtl(grads, losses_vec, self.ref_vec, self.pref_idx)
normalize_coeff = (len(self.objectives) / torch.sum(torch.abs(weight_vec)))
weight_vec = (weight_vec * normalize_coeff)
loss_total = None
for (a, objective) in zip(weight_vec, self.objectives):
logits = self.model(batch)
batch.update(logits)
task_loss = objective(**batch)
loss_total = ((a * task_loss) if (not loss_total) else (loss_total + (a * task_loss)))
loss_total.backward()
return loss_total.item()
def eval_step(self, batch):
self.model.eval()
return [self.model(batch)]
|
class SingleTaskMethod(BaseMethod):
def __init__(self, objectives, num_starts, **kwargs):
self.objectives = objectives
if (num_starts > 1):
self.task = (- 1)
assert ('task_id' not in kwargs)
else:
assert (num_starts == 1)
print(objectives)
self.task = (kwargs['task_id'] - 1)
for obj in objectives:
if (kwargs['task_id'] == int(obj.label_name.replace('labels_', ''))):
self.objectives = {kwargs['task_id']: obj}
self.model = model_from_dataset(method='single_task', **kwargs).cuda()
def model_params(self):
return list(self.model.parameters())
def new_epoch(self, e):
self.model.train()
if (e == 0):
self.task += 1
def step(self, batch):
batch.update(self.model(batch))
loss = self.objectives[self.task](**batch)
loss.backward()
return loss.item()
def log(self):
return {'task': self.task}
def eval_step(self, batch, test_rays=None):
self.model.eval()
with torch.no_grad():
return [self.model(batch)]
|
class UniformScalingMethod(BaseMethod):
def __init__(self, objectives, **kwargs):
self.objectives = objectives
self.J = len(objectives)
self.model = model_from_dataset(method='uniform_scaling', **kwargs).cuda()
def model_params(self):
return list(self.model.parameters())
def new_epoch(self, e):
self.model.train()
def step(self, batch):
batch.update(self.model(batch))
loss = sum([((1 / self.J) * o(**batch)) for o in self.objectives])
loss.backward()
return loss.item()
def eval_step(self, batch, test_rays=None):
self.model.eval()
with torch.no_grad():
return [self.model(batch)]
|
class MBConvBlock(nn.Module):
'Mobile Inverted Residual Bottleneck Block.\n Args:\n block_args (namedtuple): BlockArgs, defined in utils.py.\n global_params (namedtuple): GlobalParam, defined in utils.py.\n image_size (tuple or list): [image_height, image_width].\n References:\n [1] https://arxiv.org/abs/1704.04861 (MobileNet v1)\n [2] https://arxiv.org/abs/1801.04381 (MobileNet v2)\n [3] https://arxiv.org/abs/1905.02244 (MobileNet v3)\n '
def __init__(self, block_args, global_params, image_size=None):
super().__init__()
self._block_args = block_args
self._bn_mom = (1 - global_params.batch_norm_momentum)
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = ((self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1))
self.id_skip = block_args.id_skip
inp = self._block_args.input_filters
oup = (self._block_args.input_filters * self._block_args.expand_ratio)
if (self._block_args.expand_ratio != 1):
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
k = self._block_args.kernel_size
s = self._block_args.stride
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._depthwise_conv = Conv2d(in_channels=oup, out_channels=oup, groups=oup, kernel_size=k, stride=s, bias=False)
image_size = calculate_output_image_size(image_size, s)
if self.has_se:
Conv2d = get_same_padding_conv2d(image_size=(1, 1))
num_squeezed_channels = max(1, int((self._block_args.input_filters * self._block_args.se_ratio)))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
final_oup = self._block_args.output_filters
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
"MBConvBlock's forward function.\n Args:\n inputs (tensor): Input tensor.\n drop_connect_rate (bool): Drop connect rate (float, between 0 and 1).\n Returns:\n Output of this block after processing.\n "
x = inputs
if (self._block_args.expand_ratio != 1):
x = self._expand_conv(inputs)
x = self._swish(x)
x = self._depthwise_conv(x)
x = self._swish(x)
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_reduce(x_squeezed)
x_squeezed = self._swish(x_squeezed)
x_squeezed = self._se_expand(x_squeezed)
x = (torch.sigmoid(x_squeezed) * x)
x = self._project_conv(x)
(input_filters, output_filters) = (self._block_args.input_filters, self._block_args.output_filters)
if (self.id_skip and (self._block_args.stride == 1) and (input_filters == output_filters)):
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = (x + inputs)
return x
def set_swish(self, memory_efficient=True):
'Sets swish function as memory efficient (for training) or standard (for export).\n Args:\n memory_efficient (bool): Whether to use memory-efficient version of swish.\n '
self._swish = (MemoryEfficientSwish() if memory_efficient else Swish())
|
class EfficientNet(nn.Module):
"EfficientNet model.\n Most easily loaded with the .from_name or .from_pretrained methods.\n Args:\n blocks_args (list[namedtuple]): A list of BlockArgs to construct blocks.\n global_params (namedtuple): A set of GlobalParams shared between blocks.\n References:\n [1] https://arxiv.org/abs/1905.11946 (EfficientNet)\n Example:\n \n \n import torch\n >>> from efficientnet.model import EfficientNet\n >>> inputs = torch.rand(1, 3, 224, 224)\n >>> model = EfficientNet.from_pretrained('efficientnet-b0')\n >>> model.eval()\n >>> outputs = model(inputs)\n "
def __init__(self, blocks_args=None, global_params=None):
super().__init__()
assert isinstance(blocks_args, list), 'blocks_args should be a list'
assert (len(blocks_args) > 0), 'block args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
bn_mom = (1 - self._global_params.batch_norm_momentum)
bn_eps = self._global_params.batch_norm_epsilon
image_size = global_params.image_size
Conv2d = get_same_padding_conv2d(image_size=image_size)
out_channels = round_filters(32, self._global_params)
self.stem_out_channels = out_channels
self._conv_stem = Conv2d(self.my_in_channels, out_channels, kernel_size=3, stride=2, bias=False)
image_size = calculate_output_image_size(image_size, 2)
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
block_args = block_args._replace(input_filters=round_filters(block_args.input_filters, self._global_params), output_filters=round_filters(block_args.output_filters, self._global_params), num_repeat=round_repeats(block_args.num_repeat, self._global_params))
self._blocks.append(MBConvBlock(block_args, self._global_params, image_size=image_size))
image_size = calculate_output_image_size(image_size, block_args.stride)
if (block_args.num_repeat > 1):
block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
for _ in range((block_args.num_repeat - 1)):
self._blocks.append(MBConvBlock(block_args, self._global_params, image_size=image_size))
in_channels = block_args.output_filters
out_channels = round_filters(1280, self._global_params)
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self._avg_pooling = nn.AdaptiveAvgPool2d(1)
self._dropout = nn.Dropout(self._global_params.dropout_rate)
self._fc = nn.Linear(out_channels, self._global_params.num_classes)
self._swish = MemoryEfficientSwish()
def set_swish(self, memory_efficient=True):
'Sets swish function as memory efficient (for training) or standard (for export).\n Args:\n memory_efficient (bool): Whether to use memory-efficient version of swish.\n '
self._swish = (MemoryEfficientSwish() if memory_efficient else Swish())
for block in self._blocks:
block.set_swish(memory_efficient)
def extract_endpoints(self, inputs):
"Use convolution layer to extract features\n from reduction levels i in [1, 2, 3, 4, 5].\n Args:\n inputs (tensor): Input tensor.\n Returns:\n Dictionary of last intermediate features\n with reduction levels i in [1, 2, 3, 4, 5].\n Example:\n >>> import torch\n >>> from efficientnet.model import EfficientNet\n >>> inputs = torch.rand(1, 3, 224, 224)\n >>> model = EfficientNet.from_pretrained('efficientnet-b0')\n >>> endpoints = model.extract_endpoints(inputs)\n >>> print(endpoints['reduction_1'].shape) # torch.Size([1, 16, 112, 112])\n >>> print(endpoints['reduction_2'].shape) # torch.Size([1, 24, 56, 56])\n >>> print(endpoints['reduction_3'].shape) # torch.Size([1, 40, 28, 28])\n >>> print(endpoints['reduction_4'].shape) # torch.Size([1, 112, 14, 14])\n >>> print(endpoints['reduction_5'].shape) # torch.Size([1, 1280, 7, 7])\n "
endpoints = dict()
x = self._swish(self._conv_stem(inputs))
prev_x = x
for (idx, block) in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= (float(idx) / len(self._blocks))
x = block(x, drop_connect_rate=drop_connect_rate)
if (prev_x.size(2) > x.size(2)):
endpoints['reduction_{}'.format((len(endpoints) + 1))] = prev_x
prev_x = x
x = self._swish(self._conv_head(x))
endpoints['reduction_{}'.format((len(endpoints) + 1))] = x
return endpoints
def extract_features(self, inputs):
'use convolution layer to extract feature .\n Args:\n inputs (tensor): Input tensor.\n Returns:\n Output of the final convolution\n layer in the efficientnet model.\n '
x = self._swish(self._conv_stem(inputs))
for (idx, block) in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= (float(idx) / len(self._blocks))
x = block(x, drop_connect_rate=drop_connect_rate)
x = self._swish(self._conv_head(x))
return x
def forward(self, inputs):
"EfficientNet's forward function.\n Calls extract_features to extract features, applies final linear layer, and returns logits.\n Args:\n inputs (tensor): Input tensor.\n Returns:\n Output of this model after processing.\n "
x = self.extract_features(inputs)
x = self._avg_pooling(x)
if self._global_params.include_top:
x = x.flatten(start_dim=1)
x = self._dropout(x)
x = self._fc(x)
return x
@classmethod
def from_name(cls, model_name, in_channels=3, **override_params):
"create an efficientnet model according to name.\n Args:\n model_name (str): Name for efficientnet.\n in_channels (int): Input data's channel number.\n override_params (other key word params):\n Params to override model's global_params.\n Optional key:\n 'width_coefficient', 'depth_coefficient',\n 'image_size', 'dropout_rate',\n 'num_classes', 'batch_norm_momentum',\n 'batch_norm_epsilon', 'drop_connect_rate',\n 'depth_divisor', 'min_depth'\n Returns:\n An efficientnet model.\n "
cls._check_model_name_is_valid(model_name)
(blocks_args, global_params) = get_model_params(model_name, override_params)
model = cls(blocks_args, global_params)
model._change_in_channels(in_channels)
return model
@classmethod
def from_pretrained(cls, model_name, weights_path=None, advprop=False, in_channels=3, num_classes=1000, **override_params):
"create an efficientnet model according to name.\n Args:\n model_name (str): Name for efficientnet.\n weights_path (None or str):\n str: path to pretrained weights file on the local disk.\n None: use pretrained weights downloaded from the Internet.\n advprop (bool):\n Whether to load pretrained weights\n trained with advprop (valid when weights_path is None).\n in_channels (int): Input data's channel number.\n num_classes (int):\n Number of categories for classification.\n It controls the output size for final linear layer.\n override_params (other key word params):\n Params to override model's global_params.\n Optional key:\n 'width_coefficient', 'depth_coefficient',\n 'image_size', 'dropout_rate',\n 'batch_norm_momentum',\n 'batch_norm_epsilon', 'drop_connect_rate',\n 'depth_divisor', 'min_depth'\n Returns:\n A pretrained efficientnet model.\n "
model = cls.from_name(model_name=model_name, num_classes=num_classes, **override_params)
load_pretrained_weights(model, model_name, weights_path=weights_path, load_fc=(num_classes == 1000), advprop=advprop)
model._change_in_channels(in_channels)
return model
@classmethod
def get_image_size(cls, model_name):
'Get the input image size for a given efficientnet model.\n Args:\n model_name (str): Name for efficientnet.\n Returns:\n Input image size (resolution).\n '
cls._check_model_name_is_valid(model_name)
(_, _, res, _) = efficientnet_params(model_name)
return res
@classmethod
def _check_model_name_is_valid(cls, model_name):
'Validates model name.\n Args:\n model_name (str): Name for efficientnet.\n Returns:\n bool: Is a valid name or not.\n '
if (model_name not in VALID_MODELS):
raise ValueError(('model_name should be one of: ' + ', '.join(VALID_MODELS)))
def _change_in_channels(self, in_channels):
"Adjust model's first convolution layer to in_channels, if in_channels not equals 3.\n Args:\n in_channels (int): Input data's channel number.\n "
if (in_channels != 3):
Conv2d = get_same_padding_conv2d(image_size=self._global_params.image_size)
out_channels = round_filters(32, self._global_params)
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
|
class Swish(nn.Module):
def forward(self, x):
return (x * torch.sigmoid(x))
|
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = (i * torch.sigmoid(i))
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_tensors[0]
sigmoid_i = torch.sigmoid(i)
return (grad_output * (sigmoid_i * (1 + (i * (1 - sigmoid_i)))))
|
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
|
def round_filters(filters, global_params):
'Calculate and round number of filters based on width multiplier.\n Use width_coefficient, depth_divisor and min_depth of global_params.\n Args:\n filters (int): Filters number to be calculated.\n global_params (namedtuple): Global params of the model.\n Returns:\n new_filters: New filters number after calculating.\n '
multiplier = global_params.width_coefficient
if (not multiplier):
return filters
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
min_depth = (min_depth or divisor)
new_filters = max(min_depth, ((int((filters + (divisor / 2))) // divisor) * divisor))
if (new_filters < (0.9 * filters)):
new_filters += divisor
return int(new_filters)
|
def round_repeats(repeats, global_params):
"Calculate module's repeat number of a block based on depth multiplier.\n Use depth_coefficient of global_params.\n Args:\n repeats (int): num_repeat to be calculated.\n global_params (namedtuple): Global params of the model.\n Returns:\n new repeat: New repeat number after calculating.\n "
multiplier = global_params.depth_coefficient
if (not multiplier):
return repeats
return int(math.ceil((multiplier * repeats)))
|
def drop_connect(inputs, p, training):
'Drop connect.\n Args:\n input (tensor: BCWH): Input of this structure.\n p (float: 0.0~1.0): Probability of drop connection.\n training (bool): The running mode.\n Returns:\n output: Output after drop connection.\n '
assert (0 <= p <= 1), 'p must be in range of [0,1]'
if (not training):
return inputs
batch_size = inputs.shape[0]
keep_prob = (1 - p)
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = ((inputs / keep_prob) * binary_tensor)
return output
|
def get_width_and_height_from_size(x):
'Obtain height and width from x.\n Args:\n x (int, tuple or list): Data size.\n Returns:\n size: A tuple or list (H,W).\n '
if isinstance(x, int):
return (x, x)
if (isinstance(x, list) or isinstance(x, tuple)):
return x
else:
raise TypeError()
|
def calculate_output_image_size(input_image_size, stride):
"Calculates the output image size when using Conv2dSamePadding with a stride.\n Necessary for static padding. Thanks to mannatsingh for pointing this out.\n Args:\n input_image_size (int, tuple or list): Size of input image.\n stride (int, tuple or list): Conv2d operation's stride.\n Returns:\n output_image_size: A list [H,W].\n "
if (input_image_size is None):
return None
(image_height, image_width) = get_width_and_height_from_size(input_image_size)
stride = (stride if isinstance(stride, int) else stride[0])
image_height = int(math.ceil((image_height / stride)))
image_width = int(math.ceil((image_width / stride)))
return [image_height, image_width]
|
def get_same_padding_conv2d(image_size=None):
'Chooses static padding if you have specified an image size, and dynamic padding otherwise.\n Static padding is necessary for ONNX exporting of models.\n Args:\n image_size (int or tuple): Size of the image.\n Returns:\n Conv2dDynamicSamePadding or Conv2dStaticSamePadding.\n '
if (image_size is None):
return Conv2dDynamicSamePadding
else:
return partial(Conv2dStaticSamePadding, image_size=image_size)
|
class Conv2dDynamicSamePadding(nn.Conv2d):
'2D Convolutions like TensorFlow, for a dynamic image size.\n The padding is operated in forward function by calculating dynamically.\n '
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
self.stride = (self.stride if (len(self.stride) == 2) else ([self.stride[0]] * 2))
def forward(self, x):
(ih, iw) = x.size()[(- 2):]
(kh, kw) = self.weight.size()[(- 2):]
(sh, sw) = self.stride
(oh, ow) = (math.ceil((ih / sh)), math.ceil((iw / sw)))
pad_h = max((((((oh - 1) * self.stride[0]) + ((kh - 1) * self.dilation[0])) + 1) - ih), 0)
pad_w = max((((((ow - 1) * self.stride[1]) + ((kw - 1) * self.dilation[1])) + 1) - iw), 0)
if ((pad_h > 0) or (pad_w > 0)):
x = F.pad(x, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
|
class Conv2dStaticSamePadding(nn.Conv2d):
"2D Convolutions like TensorFlow's 'SAME' mode, with the given input image size.\n The padding mudule is calculated in construction function, then used in forward.\n "
def __init__(self, in_channels, out_channels, kernel_size, stride=1, image_size=None, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, stride, **kwargs)
self.stride = (self.stride if (len(self.stride) == 2) else ([self.stride[0]] * 2))
assert (image_size is not None)
(ih, iw) = ((image_size, image_size) if isinstance(image_size, int) else image_size)
(kh, kw) = self.weight.size()[(- 2):]
(sh, sw) = self.stride
(oh, ow) = (math.ceil((ih / sh)), math.ceil((iw / sw)))
pad_h = max((((((oh - 1) * self.stride[0]) + ((kh - 1) * self.dilation[0])) + 1) - ih), 0)
pad_w = max((((((ow - 1) * self.stride[1]) + ((kw - 1) * self.dilation[1])) + 1) - iw), 0)
if ((pad_h > 0) or (pad_w > 0)):
self.static_padding = nn.ZeroPad2d(((pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))))
else:
self.static_padding = nn.Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.