code
stringlengths 17
6.64M
|
---|
class CrisprGenerator(Sequence):
def __init__(self, ref_store, out_store, samp_list, minproba=1):
self.ref_store = ref_store
self.out_store = out_store
self.samp_list = samp_list
self.minproba = minproba
def __getitem__(self, item):
samp_id = self.samp_list[item]
x_left = self.ref_store[samp_id]['x_left']
x_right = self.ref_store[samp_id]['x_right']
x_out = self.out_store[samp_id]['x_out']
proba = self.out_store[samp_id]['proba']
proba_inds = np.where((proba > self.minproba))
x_out = x_out[proba_inds]
proba = proba[proba_inds]
total = x_out.shape[0]
x_left = np.dstack(([x_left] * total))
x_right = np.dstack(([x_right] * total))
x_left = np.transpose(x_left, (2, 0, 1))
x_right = np.transpose(x_right, (2, 0, 1))
return ([x_left, x_right, x_out], (proba / 100.0))
def __len__(self):
return len(self.samp_list)
|
def batched_pearson_loss(y_true, y_pred):
'Custom loss function for computing negative Pearson correlation within\n each batch.\n\n Parameters\n ----------\n y_true : tf.Tensor\n ground-truth; expect shape to be (None, 1).\n y_pred : tf.Tensor\n predicted values; expect shape to be (None, 1). The prediction `y_pred` is the linear part of the\n softmax function, and the correlation will be calculated *after* softmax transformation.\n\n Returns\n -------\n tf.Tensor : the computed loss\n negative pearson correlation coefficient\n\n Notes\n -------\n Need to be careful if y_pred is a constant, say, all zeros. For now, defines 0/0:=0.\n\n '
x = y_true
y = tf.exp(y_pred)
y = (y / tf.reduce_sum(y))
x_mean = tf.reduce_mean(x)
y_mean = tf.reduce_mean(y)
r_num = tf.reduce_sum(((x - x_mean) * (y - y_mean)))
r_denom_x = tf.sqrt(tf.reduce_sum(((x - x_mean) ** 2)))
r_denom_y = tf.sqrt(tf.reduce_sum(((y - y_mean) ** 2)))
r_denom = (r_denom_x * r_denom_y)
r = tf.math.divide_no_nan(r_num, r_denom)
loss = (r * (- 1))
return loss
|
def get_branch_ms(ns):
branch_ms = ModelSpace.from_dict([[{'Layer_type': 'conv1d', 'filters': 32, 'kernel_size': 12, 'activation': 'relu', 'name': ('%s_L1_relu12' % ns)}, {'Layer_type': 'conv1d', 'filters': 32, 'kernel_size': 12, 'activation': 'tanh', 'name': ('%s_L1_tanh12' % ns)}, {'Layer_type': 'conv1d', 'filters': 32, 'kernel_size': 12, 'activation': 'sigmoid', 'name': ('%s_L1_sigmoid12' % ns)}, {'Layer_type': 'conv1d', 'filters': 32, 'kernel_size': 2, 'activation': 'relu', 'name': ('%s_L1_relu2' % ns)}, {'Layer_type': 'conv1d', 'filters': 32, 'kernel_size': 2, 'activation': 'tanh', 'name': ('%s_L1_tanh2' % ns)}, {'Layer_type': 'conv1d', 'filters': 32, 'kernel_size': 2, 'activation': 'sigmoid', 'name': ('%s_L1_sigmoid2' % ns)}], [{'Layer_type': 'conv1d', 'filters': 32, 'kernel_size': 12, 'activation': 'relu', 'name': ('%s_L2_relu12' % ns)}, {'Layer_type': 'conv1d', 'filters': 32, 'kernel_size': 12, 'activation': 'tanh', 'name': ('%s_L2_tanh12' % ns)}, {'Layer_type': 'conv1d', 'filters': 32, 'kernel_size': 12, 'activation': 'sigmoid', 'name': ('%s_L2_sigmoid12' % ns)}, {'Layer_type': 'conv1d', 'filters': 32, 'kernel_size': 2, 'activation': 'relu', 'name': ('%s_L2_relu2' % ns)}, {'Layer_type': 'conv1d', 'filters': 32, 'kernel_size': 2, 'activation': 'tanh', 'name': ('%s_L2_tanh2' % ns)}, {'Layer_type': 'conv1d', 'filters': 32, 'kernel_size': 2, 'activation': 'sigmoid', 'name': ('%s_L2_sigmoid2' % ns)}, {'Layer_type': 'identity', 'name': ('%s_L2_id' % ns)}], [{'Layer_type': 'maxpool1d', 'pool_size': 2, 'strides': 2, 'name': ('%s_L3_maxp2' % ns)}, {'Layer_type': 'avgpool1d', 'pool_size': 2, 'strides': 2, 'name': ('%s_L3_avgp2' % ns)}, {'Layer_type': 'identity', 'name': ('%s_L3_id' % ns)}], [{'Layer_type': 'dropout', 'rate': 0.2, 'name': ('%s_L4_drop0.2' % ns)}, {'Layer_type': 'dropout', 'rate': 0.4, 'name': ('%s_L4_drop0.4' % ns)}, {'Layer_type': 'identity', 'name': ('%s_L4_id' % ns)}], [{'Layer_type': 'flatten', 'name': ('%s_L5_flat' % ns)}, {'Layer_type': 'globalavgpool1d', 'name': ('%s_L5_gbavg' % ns)}]])
return branch_ms
|
def get_model_space(out_filters=64, num_layers=9):
model_space = ModelSpace()
num_pool = 4
expand_layers = [((num_layers // 4) - 1), (((num_layers // 4) * 2) - 1), (((num_layers // 4) * 3) - 1)]
for i in range(num_layers):
model_space.add_layer(i, [Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu'), Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu'), Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu', dilation=10), Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu', dilation=10), Operation('maxpool1d', filters=out_filters, pool_size=4, strides=1), Operation('avgpool1d', filters=out_filters, pool_size=4, strides=1), Operation('identity', filters=out_filters)])
if (i in expand_layers):
out_filters *= 2
return model_space
|
def get_model_space(out_filters=64, num_layers=9):
model_space = ModelSpace()
num_pool = 4
expand_layers = [((num_layers // 4) - 1), (((num_layers // 4) * 2) - 1), (((num_layers // 4) * 3) - 1)]
for i in range(num_layers):
model_space.add_layer(i, [Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu'), Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu'), Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu', dilation=10), Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu', dilation=10), Operation('maxpool1d', filters=out_filters, pool_size=4, strides=1), Operation('avgpool1d', filters=out_filters, pool_size=4, strides=1), Operation('identity', filters=out_filters)])
if (i in expand_layers):
out_filters *= 2
return model_space
|
def get_model_space(out_filters=64, num_layers=9):
model_space = ModelSpace()
num_pool = 4
expand_layers = [((num_layers // 4) - 1), (((num_layers // 4) * 2) - 1), (((num_layers // 4) * 3) - 1)]
for i in range(num_layers):
model_space.add_layer(i, [Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu'), Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu'), Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu', dilation=10), Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu', dilation=10), Operation('maxpool1d', filters=out_filters, pool_size=4, strides=1), Operation('avgpool1d', filters=out_filters, pool_size=4, strides=1), Operation('identity', filters=out_filters)])
if (i in expand_layers):
out_filters *= 2
return model_space
|
class Tokenizer(object):
def __init__(self, chars='abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:’"/|_#$%ˆ&*˜‘+=<>()[]{} ', unk_token=True):
self.chars = chars
self.unk_token = (69 if unk_token else None)
self.build()
def build(self):
'Build up char2idx.\n '
self.idx = 1
self.char2idx = {}
self.idx2char = {}
for char in self.chars:
self.char2idx[char] = self.idx
self.idx2char[self.idx] = char
self.idx += 1
def char_to_idx(self, c):
'Return the integer character index of a character token.\n '
if (not (c in self.char2idx)):
if (self.unk_token is None):
return None
else:
return self.unk_token
return self.char2idx[c]
def idx_to_char(self, idx):
'Return the character string of an integer word index.\n '
if (idx > len(self.idx2char)):
if (self.unk_token is None):
return ''
else:
return '<UNK>'
elif (idx == 0):
return ''
return self.idx2char[idx]
def __len__(self):
'Return the length of the vocabulary.\n '
return len(self.char2idx)
def text_to_sequence(self, text, maxlen=1014):
text = text.lower()
data = np.zeros(maxlen).astype(int)
for i in range(len(text)):
if (i >= maxlen):
return data
if (text[i] in self.char2idx):
data[i] = self.char_to_idx(text[i])
return data
|
def utf8_to_sequence(text, maxlen=1014):
text = text.decode('utf-8')
text = text.lower()
data = np.zeros((maxlen, 1)).astype(int)
for i in range(len(text)):
if (i >= maxlen):
return data
data[i] = ord(text[i])
return data
|
def get_model_space(out_filters=64, num_layers=9):
model_space = ModelSpace()
num_pool = 4
expand_layers = [((num_layers // 4) - 1), (((num_layers // 4) * 2) - 1), (((num_layers // 4) * 3) - 1)]
for i in range(num_layers):
model_space.add_layer(i, [Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu'), Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu'), Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu', dilation=10), Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu', dilation=10), Operation('maxpool1d', filters=out_filters, pool_size=4, strides=1), Operation('avgpool1d', filters=out_filters, pool_size=4, strides=1), Operation('identity', filters=out_filters)])
if (i in expand_layers):
out_filters *= 2
return model_space
|
def get_model_space(out_filters=64, num_layers=9):
model_space = ModelSpace()
num_pool = 4
expand_layers = [((num_layers // 4) - 1), (((num_layers // 4) * 2) - 1), (((num_layers // 4) * 3) - 1)]
for i in range(num_layers):
model_space.add_layer(i, [Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu'), Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu'), Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu', dilation=10), Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu', dilation=10), Operation('maxpool1d', filters=out_filters, pool_size=4, strides=1), Operation('avgpool1d', filters=out_filters, pool_size=4, strides=1), Operation('identity', filters=out_filters)])
if (i in expand_layers):
out_filters *= 2
return model_space
|
def get_data_config_amber_encoded(fp, feat_name, batch_size, shuffle):
'Prepare the kwargs for BatchedHDF5Generator\n\n Note\n ------\n Only works for the layout of `data/zero_shot/amber_encoded.train_feats.train.h5`\n '
d = {'hdf5_fp': fp, 'x_selector': Selector('x'), 'y_selector': Selector(('labels/%s' % feat_name)), 'batch_size': batch_size, 'shuffle': shuffle}
return d
|
def get_data_config_deepsea_compiled(fp, feat_name, batch_size, shuffle):
'Equivalent for amber encoded but for deepsea 919 compiled hdf5\n '
meta = read_metadata()
d = {'hdf5_fp': fp, 'x_selector': Selector(label='x'), 'y_selector': Selector(label='y', index=meta.loc[feat_name].col_idx), 'batch_size': batch_size, 'shuffle': shuffle}
return d
|
def amber_app(wd, feat_name, run=False):
type_dict = {'controller_type': 'GeneralController', 'knowledge_fn_type': 'zero', 'reward_fn_type': 'LossAucReward', 'modeler_type': 'KerasModelBuilder', 'manager_type': 'DistributedManager', 'env_type': 'ControllerTrainEnv'}
train_data_kwargs = get_data_config_deepsea_compiled(fp='./data/zero_shot_deepsea/train.h5', feat_name=feat_name, batch_size=1024, shuffle=True)
validate_data_kwargs = get_data_config_deepsea_compiled(fp='./data/zero_shot_deepsea/val.h5', feat_name=feat_name, batch_size=1024, shuffle=False)
os.makedirs(wd, exist_ok=True)
input_node = [Operation('input', shape=(1000, 4), name='input')]
output_node = [Operation('dense', units=1, activation='sigmoid', name='output')]
model_compile_dict = {'loss': 'binary_crossentropy', 'optimizer': 'adam', 'metrics': ['acc']}
(model_space, layer_embedding_sharing) = get_model_space()
batch_size = 1024
use_ppo = False
specs = {'model_space': model_space, 'controller': {'share_embedding': layer_embedding_sharing, 'with_skip_connection': False, 'skip_weight': None, 'lstm_size': 128, 'lstm_num_layers': 1, 'kl_threshold': 0.1, 'train_pi_iter': 100, 'optim_algo': 'adam', 'rescale_advantage_by_reward': False, 'temperature': 1.0, 'tanh_constant': 1.5, 'buffer_size': 10, 'batch_size': 5, 'use_ppo_loss': use_ppo}, 'model_builder': {'batch_size': batch_size, 'inputs_op': input_node, 'outputs_op': output_node, 'model_compile_dict': model_compile_dict}, 'knowledge_fn': {'data': None, 'params': {}}, 'reward_fn': {'method': 'auc'}, 'manager': {'data': {'train_data': BatchedHDF5Generator, 'validation_data': BatchedHDF5Generator}, 'params': {'train_data_kwargs': train_data_kwargs, 'validate_data_kwargs': validate_data_kwargs, 'devices': ['/device:GPU:0'], 'epochs': 100, 'fit_kwargs': {'earlystop_patience': 40, 'steps_per_epoch': 100, 'max_queue_size': 50, 'workers': 3}, 'child_batchsize': batch_size, 'store_fn': 'model_plot', 'working_dir': wd, 'verbose': 0}}, 'train_env': {'max_episode': 75, 'max_step_per_ep': 5, 'working_dir': wd, 'time_budget': '24:00:00', 'with_skip_connection': False, 'save_controller_every': 1}}
amb = Amber(types=type_dict, specs=specs)
if run:
amb.run()
return amb
|
class RocCallback(keras.callbacks.Callback):
def __init__(self, training_data, validation_data):
self.x = training_data[0]
self.y = training_data[1]
self.x_val = validation_data[0]
self.y_val = validation_data[1]
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
y_pred_train = self.model.predict(self.x)
roc_train = roc_auc_score(self.y, y_pred_train)
y_pred_val = self.model.predict(self.x_val)
roc_val = roc_auc_score(self.y_val, y_pred_val)
print(('\rroc-auc_train: %s - roc-auc_val: %s' % (str(round(roc_train, 4)), str(round(roc_val, 4)))), end=((100 * ' ') + '\n'))
return
def on_batch_begin(self, batch, logs={}):
return
def on_batch_end(self, batch, logs={}):
return
|
class Metrics(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self._data = []
def on_epoch_end(self, batch, logs={}):
(X_val, y_val) = (self.validation_data[0], self.validation_data[1])
y_predict = np.asarray(model.predict(X_val))
y_val = np.argmax(y_val, axis=1)
y_predict = np.argmax(y_predict, axis=1)
self._data.append({'val_recall': recall_score(y_val, y_predict), 'val_precision': precision_score(y_val, y_predict)})
return
def get_data(self):
return self._data
|
def sigmoid(z):
return (1 / (1 + np.exp((- z))))
|
def get_model_space(out_filters=64, num_layers=9):
model_space = ModelSpace()
num_pool = 4
expand_layers = [((num_layers // 4) - 1), (((num_layers // 4) * 2) - 1), (((num_layers // 4) * 3) - 1)]
for i in range(num_layers):
model_space.add_layer(i, [Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu'), Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu'), Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu', dilation=10), Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu', dilation=10), Operation('maxpool1d', filters=out_filters, pool_size=4, strides=1), Operation('avgpool1d', filters=out_filters, pool_size=4, strides=1), Operation('identity', filters=out_filters)])
if (i in expand_layers):
out_filters *= 2
return model_space
|
def get_flops():
run_meta = tf.RunMetadata()
opts = tf.profiler.ProfileOptionBuilder.float_operation()
flops = tf.profiler.profile(graph=K.get_session().graph, run_meta=run_meta, cmd='op', options=opts)
return flops.total_float_ops
|
def main():
args = sys.argv[1:]
wd = '.'
if (args[0] == 'ECG'):
input_node = Operation('input', shape=(1000, 1), name='input')
output_node = Operation('dense', units=4, activation='softmax')
(X_train, Y_train, X_test, Y_test, pid_test) = read_data_physionet_4(wd)
Y_train = to_categorical(Y_train, num_classes=4)
Y_test = to_categorical(Y_test, num_classes=4)
bs = 512
loss = 'categorical_crossentropy'
elif (args[0] == 'satellite'):
input_node = Operation('input', shape=(46, 1), name='input')
output_node = Operation('dense', units=24, activation='softmax')
(X_train, Y_train, X_test, Y_test) = load_satellite_data(wd, False)
bs = 1024
loss = 'categorical_crossentropy'
elif (args[0] == 'deepsea'):
input_node = Operation('input', shape=(1000, 4), name='input')
output_node = Operation('dense', units=36, activation='sigmoid')
(train, test) = load_deepsea_data(wd, False)
(X_train, Y_train) = train
(X_test, Y_test) = test
bs = 128
loss = 'binary_crossentropy'
elif (args[0] == 'dbpedia'):
CHAR_MAX_LEN = 1014
input_node = Operation('input', shape=(CHAR_MAX_LEN, 1), name='input')
output_node = Operation('dense', units=14, activation='softmax')
(x, y, alphabet_size) = build_char_dataset('train', CHAR_MAX_LEN)
(x, y) = (np.expand_dims(np.array(x), axis=2), to_categorical(np.array(y), num_classes=14))
(X_train, X_test, Y_train, Y_test) = train_test_split(x, y, test_size=0.15)
bs = 64
loss = 'categorical_crossentropy'
else:
raise NotImplementedError
metrics = (Metrics() if (not (args[0] == 'deepsea')) else RocCallback(training_data=(X_train, Y_train), validation_data=(X_test, Y_test)))
history_dir = os.path.join(wd, './outputs/AmberDBPedia')
hist = read_history([os.path.join(history_dir, 'train_history.csv')], metric_name_dict={'zero': 0, 'auc': 1})
hist = hist.sort_values(by='auc', ascending=False)
hist.head(n=5)
model_space = get_model_space(out_filters=32, num_layers=12)
keras_builder = KerasResidualCnnBuilder(inputs_op=input_node, output_op=output_node, fc_units=100, flatten_mode='Flatten', model_compile_dict={'loss': loss, 'optimizer': 'adam', 'metrics': ['acc']}, model_space=model_space, dropout_rate=0.1, wsf=2)
best_arc = hist.iloc[0][[x for x in hist.columns if x.startswith('L')]].tolist()
searched_mod = keras_builder(best_arc)
searched_mod.summary()
print(get_flops())
history = searched_mod.fit(X_train, Y_train, batch_size=bs, validation_data=(X_test, Y_test), epochs=100, verbose=1, callbacks=[metrics])
if (args[0] == 'ECG'):
all_pred_prob = []
for item in X_test:
item = np.expand_dims(item, 0)
logits = searched_mod.predict(item)
all_pred_prob.append(logits)
all_pred_prob = np.concatenate(all_pred_prob)
all_pred = np.argmax(all_pred_prob, axis=1)
Y_test = np.argmax(Y_test, axis=1)
final = f1_score(all_pred, Y_test, pid_test)
print(final)
np.savetxt('score.txt', np.array(final))
elif (args[0] == 'deepsea'):
test_predictions = []
for item in X_test:
logits = searched_mod.predict(item)
logits_sigmoid = sigmoid(logits)
test_predictions.append(logits_sigmoid)
test_predictions = np.concatenate(test_predictions).astype(np.float32)
test_gts = Y_test.astype(np.int32)
stats = calculate_stats(test_predictions, test_gts)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
print(mAP)
print(mAUC)
np.savetxt('stats.txt', np.array([mAP, mAUC]))
with open('metrics.txt', 'w') as file:
file.write(json.dumps(history.history))
|
def download_dbpedia():
dbpedia_url = 'https://github.com/le-scientifique/torchDatasets/raw/master/dbpedia_csv.tar.gz'
wget.download(dbpedia_url)
with tarfile.open('dbpedia_csv.tar.gz', 'r:gz') as tar:
tar.extractall()
|
def clean_str(text):
text = re.sub('[^A-Za-z0-9(),!?\\\'\\`\\"]', ' ', text)
text = re.sub('\\s{2,}', ' ', text)
text = text.strip().lower()
return text
|
def build_char_dataset(step, document_max_len):
alphabet = 'abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:’\'"/|_#$%ˆ&*˜‘+=<>()[]{} '
if (step == 'train'):
df = pd.read_csv(TRAIN_PATH, names=['class', 'title', 'content'])
else:
df = pd.read_csv(TEST_PATH, names=['class', 'title', 'content'])
df = df.sample(frac=1)
char_dict = dict()
char_dict['<pad>'] = 0
char_dict['<unk>'] = 1
for c in alphabet:
char_dict[c] = len(char_dict)
alphabet_size = (len(alphabet) + 2)
x = list(map((lambda content: list(map((lambda d: char_dict.get(d, char_dict['<unk>'])), content.lower()))), df['content']))
x = list(map((lambda d: d[:document_max_len]), x))
x = list(map((lambda d: (d + ((document_max_len - len(d)) * [char_dict['<pad>']]))), x))
y = list(map((lambda d: (d - 1)), list(df['class'])))
return (x, y, alphabet_size)
|
def read_data_physionet_4_with_val(path, window_size=1000, stride=500):
with open(os.path.join(path, 'challenge2017.pkl'), 'rb') as fin:
res = pickle.load(fin)
all_data = res['data']
for i in range(len(all_data)):
tmp_data = all_data[i]
tmp_std = np.std(tmp_data)
tmp_mean = np.mean(tmp_data)
all_data[i] = ((tmp_data - tmp_mean) / tmp_std)
all_label = []
for i in res['label']:
if (i == 'N'):
all_label.append(0)
elif (i == 'A'):
all_label.append(1)
elif (i == 'O'):
all_label.append(2)
elif (i == '~'):
all_label.append(3)
all_label = np.array(all_label)
(X_train, X_test, Y_train, Y_test) = train_test_split(all_data, all_label, test_size=0.2, random_state=0)
(X_val, X_test, Y_val, Y_test) = train_test_split(X_test, Y_test, test_size=0.5, random_state=0)
print('before: ')
(X_train, Y_train) = slide_and_cut(X_train, Y_train, window_size=window_size, stride=stride)
(X_val, Y_val, pid_val) = slide_and_cut(X_val, Y_val, window_size=window_size, stride=stride, output_pid=True)
(X_test, Y_test, pid_test) = slide_and_cut(X_test, Y_test, window_size=window_size, stride=stride, output_pid=True)
print('after: ')
print(Counter(Y_train), Counter(Y_val), Counter(Y_test))
shuffle_pid = np.random.permutation(Y_train.shape[0])
X_train = X_train[shuffle_pid]
Y_train = Y_train[shuffle_pid]
X_train = np.expand_dims(X_train, 2)
X_val = np.expand_dims(X_val, 2)
X_test = np.expand_dims(X_test, 2)
return (X_train, Y_train, X_val, Y_val)
|
def read_data_physionet_4(path, window_size=1000, stride=500):
with open(os.path.join(path, 'challenge2017.pkl'), 'rb') as fin:
res = pickle.load(fin)
all_data = res['data']
for i in range(len(all_data)):
tmp_data = all_data[i]
tmp_std = np.std(tmp_data)
tmp_mean = np.mean(tmp_data)
all_data[i] = ((tmp_data - tmp_mean) / tmp_std)
all_label = []
for i in res['label']:
if (i == 'N'):
all_label.append(0)
elif (i == 'A'):
all_label.append(1)
elif (i == 'O'):
all_label.append(2)
elif (i == '~'):
all_label.append(3)
all_label = np.array(all_label)
(X_train, X_test, Y_train, Y_test) = train_test_split(all_data, all_label, test_size=0.1, random_state=0)
print('before: ')
print(Counter(Y_train), Counter(Y_test))
(X_train, Y_train) = slide_and_cut(X_train, Y_train, window_size=window_size, stride=stride)
(X_test, Y_test, pid_test) = slide_and_cut(X_test, Y_test, window_size=window_size, stride=stride, output_pid=True)
print('after: ')
print(Counter(Y_train), Counter(Y_test))
shuffle_pid = np.random.permutation(Y_train.shape[0])
X_train = X_train[shuffle_pid]
Y_train = Y_train[shuffle_pid]
X_train = np.expand_dims(X_train, 2)
X_test = np.expand_dims(X_test, 2)
return (X_train, Y_train, X_test, Y_test, pid_test)
|
def slide_and_cut(X, Y, window_size, stride, output_pid=False, datatype=4):
out_X = []
out_Y = []
out_pid = []
n_sample = X.shape[0]
mode = 0
for i in range(n_sample):
tmp_ts = X[i]
tmp_Y = Y[i]
if (tmp_Y == 0):
i_stride = stride
elif (tmp_Y == 1):
if (datatype == 4):
i_stride = (stride // 6)
elif (datatype == 2):
i_stride = (stride // 10)
elif (datatype == 2.1):
i_stride = (stride // 7)
elif (tmp_Y == 2):
i_stride = (stride // 2)
elif (tmp_Y == 3):
i_stride = (stride // 20)
for j in range(0, (len(tmp_ts) - window_size), i_stride):
out_X.append(tmp_ts[j:(j + window_size)])
out_Y.append(tmp_Y)
out_pid.append(i)
if output_pid:
return (np.array(out_X), np.array(out_Y), np.array(out_pid))
else:
return (np.array(out_X), np.array(out_Y))
|
def f1_score(y_true, y_pred, pid_test):
final_pred = []
final_gt = []
for i_pid in np.unique(pid_test):
tmp_pred = y_pred[(pid_test == i_pid)]
tmp_gt = y_true[(pid_test == i_pid)]
final_pred.append(Counter(tmp_pred).most_common(1)[0][0])
final_gt.append(Counter(tmp_gt).most_common(1)[0][0])
tmp_report = classification_report(final_gt, final_pred, output_dict=True)
print(confusion_matrix(final_gt, final_pred))
f1_score = ((((tmp_report['0']['f1-score'] + tmp_report['1']['f1-score']) + tmp_report['2']['f1-score']) + tmp_report['3']['f1-score']) / 4)
return f1_score
|
def custom_f1(y_true, y_pred):
def recall_m(y_true, y_pred):
TP = K.sum(K.round(K.clip((y_true * y_pred), 0, 1)))
Positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = (TP / (Positives + K.epsilon()))
return recall
def precision_m(y_true, y_pred):
TP = K.sum(K.round(K.clip((y_true * y_pred), 0, 1)))
Pred_Positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = (TP / (Pred_Positives + K.epsilon()))
return precision
(precision, recall) = (precision_m(y_true, y_pred), recall_m(y_true, y_pred))
return (2 * ((precision * recall) / ((precision + recall) + K.epsilon())))
|
def load_satellite_data(path, train):
train_file = os.path.join(path, 'satellite_train.npy')
test_file = os.path.join(path, 'satellite_test.npy')
(all_train_data, all_train_labels) = (np.load(train_file, allow_pickle=True)[()]['data'], np.load(train_file, allow_pickle=True)[()]['label'])
(test_data, test_labels) = (np.load(test_file, allow_pickle=True)[()]['data'], np.load(test_file, allow_pickle=True)[()]['label'])
all_train_labels = (all_train_labels - 1)
test_labels = (test_labels - 1)
all_train_labels = to_categorical(all_train_labels, num_classes=24)
test_labels = to_categorical(test_labels, num_classes=24)
all_train_data = ((all_train_data - all_train_data.mean(axis=1, keepdims=True)) / all_train_data.std(axis=1, keepdims=True))
test_data = ((test_data - test_data.mean(axis=1, keepdims=True)) / test_data.std(axis=1, keepdims=True))
all_train_data = np.expand_dims(all_train_data, 2)
test_data = np.expand_dims(test_data, 2)
if train:
len_val = len(test_data)
train_data = all_train_data[:(- len_val)]
train_labels = all_train_labels[:(- len_val)]
(val_data, val_labels) = (all_train_data[(- len_val):], all_train_labels[(- len_val):])
return (train_data, train_labels, val_data, val_labels)
return (all_train_data, all_train_labels, test_data, test_labels)
|
def get_controller(state_space, sess):
'Test function for building controller network. A controller is a LSTM cell that predicts the next\n layer given the previous layer and all previous layers (as stored in the hidden cell states). The\n controller model is trained by policy gradients as in reinforcement learning.\n '
with tf.device('/cpu:0'):
controller = GeneralController(model_space=state_space, lstm_size=32, lstm_num_layers=1, with_skip_connection=False, kl_threshold=0.1, train_pi_iter=100, optim_algo='adam', buffer_size=5, batch_size=5, session=sess, use_ppo_loss=True)
return controller
|
def get_mock_manager(history_fn_list, Lambda=1.0, wd='./tmp_mock'):
'Test function for building a mock manager. A mock manager\n returns a loss and knowledge instantly based on previous\n training history.\n Args:\n train_history_fn_list: a list of\n '
manager = MockManager(history_fn_list=history_fn_list, model_compile_dict={'loss': 'binary_crossentropy', 'optimizer': 'adam', 'metrics': ['acc']}, working_dir=wd, Lambda=Lambda, verbose=0)
return manager
|
def get_environment(controller, manager, should_plot, logger=None, wd='./tmp_mock/'):
'Test function for getting a training environment for controller.\n Args:\n controller: a built controller net\n manager: a manager is a function that manages child-networks. Manager is built upon `model_fn` and `reward_fn`.\n max_trials: maximum number of child-net generated\n working_dir: specifies the working dir where all files will be generated in.\n resume_prev_run: restore the controller parameters and states from a preivous run\n logger: a Logging file handler; creates a new logger if None.\n '
env = ControllerTrainEnvironment(controller, manager, max_episode=50, max_step_per_ep=3, logger=logger, resume_prev_run=False, should_plot=should_plot, working_dir=wd, with_skip_connection=False, with_input_blocks=False)
return env
|
def train_simple_controller(should_plot=False, logger=None, Lambda=1.0, wd='./outputs/mock_nas/'):
sess = tf.Session()
state_space = get_state_space()
hist_file_list = [('./data/mock_black_box/tmp_%i/train_history.csv' % i) for i in range(1, 21)]
manager = get_mock_manager(hist_file_list, Lambda=Lambda, wd=wd)
controller = get_controller(state_space, sess)
env = get_environment(controller, manager, should_plot, logger, wd=wd)
env.train()
|
def num_of_val_pos(wd):
managers = [x for x in os.listdir(wd) if x.startswith('manager')]
manager_pos_cnt = {}
for m in managers:
trials = os.listdir(os.path.join(wd, m, 'weights'))
pred = pd.read_table(os.path.join(wd, m, 'weights', trials[0], 'pred.txt'), comment='#')
manager_pos_cnt[m] = pred['obs'].sum()
return manager_pos_cnt
|
def plot_zs_hist(hist_fp, config_fp, save_prefix, zoom_first_n=None):
zs_hist = pd.read_table(hist_fp, header=None, sep=',')
configs = pd.read_table(config_fp)
zs_hist['task'] = zs_hist[0].apply((lambda x: ('Manager:%i' % int(x.split('-')[0]))))
zs_hist['task_int'] = zs_hist[0].apply((lambda x: int(x.split('-')[0])))
zs_hist['trial'] = zs_hist[0].apply((lambda x: int(x.split('-')[1])))
zs_hist['step'] = (zs_hist['trial'] // 5)
zs_hist = zs_hist[[2, 'task', 'step', 'task_int']].groupby(['task', 'step']).mean()
zs_hist['auc'] = zs_hist[2]
zs_hist = zs_hist.drop([2], axis=1)
zs_hist['task'] = [a[0] for a in zs_hist.index]
zs_hist['step'] = [a[1] for a in zs_hist.index]
zs_hist['task_int'] = (zs_hist['task_int'] // 10)
for i in zs_hist['task_int'].unique():
zs_hist_ = zs_hist.loc[(zs_hist.task_int == i)]
if (zoom_first_n is not None):
zs_hist_ = zs_hist_.loc[(zs_hist_.trial <= zoom_first_n)]
(fig, ax) = plt.subplots(1, 1, figsize=(10, 10))
sns.lineplot(x='step', y='auc', hue='task', data=zs_hist_, ax=ax, marker='o')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
plt.savefig(('%s.%i.png' % (save_prefix, i)))
plt.close()
|
def plot_single_run(feat_dirs, save_fp, zoom_first_n=None):
dfs = []
for d in feat_dirs:
hist = pd.read_table(os.path.join(d, 'train_history.csv'), header=None, sep=',')
hist['task'] = os.path.basename(d)
hist['trial'] = hist[0]
hist['auc'] = sma(hist[2], window=20)
hist = hist.drop([0, 1, 2, 3, 4, 5, 6], axis=1)
if (zoom_first_n is not None):
hist = hist.loc[(hist.trial <= zoom_first_n)]
dfs.append(hist)
df = pd.concat(dfs)
ax = sns.lineplot(x='trial', y='auc', hue='task', data=df)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
plt.savefig(save_fp)
plt.close()
|
def get_controller(model_space, session, data_description_len=3, layer_embedding_sharing=None):
with tf.device('/cpu:0'):
controller = ZeroShotController(data_description_config={'length': data_description_len}, share_embedding=layer_embedding_sharing, model_space=model_space, session=session, with_skip_connection=False, skip_weight=None, lstm_size=64, lstm_num_layers=1, kl_threshold=0.01, train_pi_iter=100, optim_algo='adam', temperature=1.0, tanh_constant=1.5, buffer_type='MultiManager', buffer_size=10, batch_size=5, use_ppo_loss=False, rescale_advantage_by_reward=True)
return controller
|
def get_manager_distributed(train_data, val_data, controller, model_space, wd, data_description, verbose=0, devices=None, train_data_kwargs=None, validate_data_kwargs=None, **kwargs):
reward_fn = LossAucReward(method='auc')
input_node = State('input', shape=(1000, 4), name='input', dtype='float32')
output_node = State('dense', units=1, activation='sigmoid')
model_compile_dict = {'loss': 'binary_crossentropy', 'optimizer': 'adam', 'metrics': ['acc']}
mb = KerasModelBuilder(inputs=input_node, outputs=output_node, model_compile_dict=model_compile_dict, model_space=model_space)
manager = DistributedGeneralManager(devices=devices, train_data_kwargs=train_data_kwargs, train_data=train_data, validate_data_kwargs=validate_data_kwargs, validation_data=val_data, epochs=30, child_batchsize=1000, reward_fn=reward_fn, model_fn=mb, store_fn='model_plot', model_compile_dict=model_compile_dict, working_dir=wd, verbose=verbose, save_full_model=False, model_space=model_space, fit_kwargs={'steps_per_epoch': 50, 'workers': 3, 'max_queue_size': 50, 'earlystop_patience': 10})
return manager
|
def get_manager_common(train_data, val_data, controller, model_space, wd, data_description, verbose=2, n_feats=1, **kwargs):
input_node = State('input', shape=(1000, 4), name='input', dtype='float32')
output_node = State('dense', units=n_feats, activation='sigmoid')
model_compile_dict = {'loss': 'binary_crossentropy', 'optimizer': 'adam', 'metrics': ['acc']}
session = controller.session
reward_fn = LossAucReward(method='auc')
gpus = get_available_gpus()
num_gpus = len(gpus)
mb = KerasModelBuilder(inputs=input_node, outputs=output_node, model_compile_dict=model_compile_dict, model_space=model_space, gpus=num_gpus)
child_batch_size = (1000 * num_gpus)
manager = GeneralManager(train_data=train_data, validation_data=val_data, epochs=1000, child_batchsize=child_batch_size, reward_fn=reward_fn, model_fn=mb, store_fn='model_plot', model_compile_dict=model_compile_dict, working_dir=wd, verbose=verbose, save_full_model=True, model_space=model_space, fit_kwargs={'steps_per_epoch': 50, 'workers': 8, 'max_queue_size': 50, 'earlystop_patience': 20})
return manager
|
def read_configs(arg):
dfeature_names = list()
with open(arg.dfeature_name_file, 'r') as read_file:
for line in read_file:
line = line.strip()
if line:
dfeature_names.append(line)
wd = arg.wd
(model_space, layer_embedding_sharing) = get_model_space_common()
print(layer_embedding_sharing)
try:
session = tf.Session()
except AttributeError:
session = tf.compat.v1.Session()
controller = get_controller(model_space=model_space, session=session, data_description_len=len(dfeature_names), layer_embedding_sharing=layer_embedding_sharing)
if arg.config_file.endswith('tsv'):
sep = '\t'
else:
sep = ','
configs = pd.read_csv(arg.config_file, sep=sep)
tmp = list(configs.columns)
if any([('"' in x) for x in tmp]):
configs = pd.read_csv(arg.config_file, sep=sep, quoting=2)
print('Re-read with quoting')
configs = configs.to_dict(orient='index')
gpus = get_available_gpus()
if (len(gpus) == 0):
gpus = [None]
gpus_ = (gpus * len(configs))
manager_getter = get_manager_distributed
config_keys = list()
seed_generator = np.random.RandomState(seed=1337)
for (i, k) in enumerate(configs.keys()):
for x in ['train', 'validate']:
if ((arg.lockstep_sampling is False) and (x == 'train')):
cur_seed = seed_generator.randint(0, np.iinfo(np.uint32).max)
else:
cur_seed = 1337
d = {'hdf5_fp': (arg.train_file if (x == 'train') else arg.val_file), 'x_selector': Selector(label='x'), 'y_selector': Selector(label=('labels/%s' % configs[k]['feat_name'])), 'batch_size': 1024, 'shuffle': (x == 'train')}
configs[k][x] = BatchedHDF5Generator
configs[k][('%s_data_kwargs' % x)] = d
configs[k]['dfeatures'] = np.array([configs[k][x] for x in dfeature_names])
tmp = dict(train_data_kwargs=configs[k]['train_data_kwargs'], validate_data_kwargs=configs[k]['validate_data_kwargs'])
configs[k]['manager'] = manager_getter(devices=[gpus_[i]], train_data=configs[k]['train'], val_data=configs[k]['validate'], controller=controller, model_space=model_space, wd=os.path.join(wd, ('manager_%s' % k)), data_description=configs[k]['dfeatures'], dag_name='AmberDAG{}'.format(k), verbose=0, n_feats=configs[k]['n_feats'], **tmp)
config_keys.append(k)
return (configs, config_keys, controller, model_space)
|
def train_nas(arg):
wd = arg.wd
logger = setup_logger(wd, verbose_level=logging.INFO)
gpus = get_available_gpus()
(configs, config_keys, controller, model_space) = read_configs(arg)
tmp = dict(data_descriptive_features=np.stack([configs[k]['dfeatures'] for k in config_keys]), controller=controller, manager=[configs[k]['manager'] for k in config_keys], logger=logger, max_episode=200, max_step_per_ep=5, working_dir=wd, time_budget='150:00:00', with_input_blocks=False, with_skip_connection=False, save_controller_every=1, enable_manager_sampling=True)
env = ParallelMultiManagerEnvironment(processes=(len(gpus) if (len(gpus) > 1) else 1), **tmp)
try:
env.train()
except KeyboardInterrupt:
print('user interrupted training')
pass
controller.save_weights(os.path.join(wd, 'controller_weights.h5'))
|
def get_controller(model_space, session, data_description_len=3, layer_embedding_sharing=None, use_ppo_loss=False, is_training=True):
with tf.device('/cpu:0'):
controller = ZeroShotController(data_description_config={'length': data_description_len, 'hidden_layer': {'units': 16, 'activation': 'relu'}, 'regularizer': {'l1': 1e-08}}, share_embedding=layer_embedding_sharing, model_space=model_space, session=session, with_skip_connection=False, skip_weight=None, lstm_size=128, lstm_num_layers=1, kl_threshold=0.1, train_pi_iter=(100 if (use_ppo_loss is True) else 20), optim_algo='adam', temperature=(1.0 if (is_training is True) else 0.5), tanh_constant=(1.5 if (is_training is True) else None), buffer_type='MultiManager', buffer_size=10, batch_size=10, use_ppo_loss=use_ppo_loss, rescale_advantage_by_reward=True)
return controller
|
def get_manager_distributed(train_data, val_data, controller, model_space, wd, data_description, verbose=0, devices=None, train_data_kwargs=None, validate_data_kwargs=None, **kwargs):
reward_fn = LossAucReward(method='auc')
input_node = State('input', shape=(1000, 4), name='input', dtype='float32')
output_node = State('dense', units=1, activation='sigmoid')
model_compile_dict = {'loss': 'binary_crossentropy', 'optimizer': 'adam', 'metrics': ['acc']}
mb = KerasModelBuilder(inputs=input_node, outputs=output_node, model_compile_dict=model_compile_dict, model_space=model_space)
manager = DistributedGeneralManager(devices=devices, train_data_kwargs=train_data_kwargs, train_data=train_data, validate_data_kwargs=validate_data_kwargs, validation_data=val_data, epochs=100, child_batchsize=1000, reward_fn=reward_fn, model_fn=mb, store_fn='model_plot', model_compile_dict=model_compile_dict, working_dir=wd, verbose=verbose, save_full_model=False, model_space=model_space, fit_kwargs={'steps_per_epoch': 100, 'workers': 3, 'max_queue_size': 50, 'earlystop_patience': 10})
return manager
|
def read_configs(arg, is_training=True):
meta = read_metadata()
dfeature_names = list()
with open(arg.dfeature_name_file, 'r') as read_file:
for line in read_file:
line = line.strip()
if line:
dfeature_names.append(line)
wd = arg.wd
model_spaces_mapper = {'simple': get_model_space_simple, 'long': get_model_space_long, 'long_and_dilation': get_model_space_long_and_dilation}
(model_space, layer_embedding_sharing) = model_spaces_mapper[arg.model_space]()
print(layer_embedding_sharing)
try:
session = tf.Session()
except AttributeError:
session = tf.compat.v1.Session()
controller = get_controller(model_space=model_space, session=session, data_description_len=len(dfeature_names), layer_embedding_sharing=layer_embedding_sharing, use_ppo_loss=arg.ppo, is_training=is_training)
if arg.config_file.endswith('tsv'):
sep = '\t'
else:
sep = ','
configs = pd.read_csv(arg.config_file, sep=sep)
tmp = list(configs.columns)
if any([('"' in x) for x in tmp]):
configs = pd.read_csv(arg.config_file, sep=sep, quoting=2)
print('Re-read with quoting')
configs = configs.to_dict(orient='index')
gpus = get_available_gpus()
if (len(gpus) == 0):
gpus = [None]
gpus_ = (gpus * len(configs))
manager_getter = get_manager_distributed
config_keys = list()
seed_generator = np.random.RandomState(seed=1337)
for (i, k) in enumerate(configs.keys()):
for x in ['train', 'validate']:
if ((arg.lockstep_sampling is False) and (x == 'train')):
cur_seed = seed_generator.randint(0, np.iinfo(np.uint32).max)
else:
cur_seed = 1337
d = {'hdf5_fp': (arg.train_file if (x == 'train') else arg.val_file), 'x_selector': Selector(label='x'), 'y_selector': Selector(label='y', index=meta.loc[configs[k]['feat_name']].col_idx), 'batch_size': 1024, 'shuffle': (x == 'train')}
configs[k][x] = BatchedHDF5Generator
configs[k][('%s_data_kwargs' % x)] = d
configs[k]['dfeatures'] = np.array([configs[k][x] for x in dfeature_names])
tmp = dict(train_data_kwargs=configs[k]['train_data_kwargs'], validate_data_kwargs=configs[k]['validate_data_kwargs'])
configs[k]['manager'] = manager_getter(devices=[gpus_[i]], train_data=configs[k]['train'], val_data=configs[k]['validate'], controller=controller, model_space=model_space, wd=os.path.join(wd, ('manager_%s' % k)), data_description=configs[k]['dfeatures'], dag_name='AmberDAG{}'.format(k), verbose=0, n_feats=configs[k]['n_feats'], **tmp)
config_keys.append(k)
return (configs, config_keys, controller, model_space)
|
def train_nas(arg):
wd = arg.wd
logger = setup_logger(wd, verbose_level=logging.INFO)
gpus = get_available_gpus()
(configs, config_keys, controller, model_space) = read_configs(arg)
tmp = dict(data_descriptive_features=np.stack([configs[k]['dfeatures'] for k in config_keys]), controller=controller, manager=[configs[k]['manager'] for k in config_keys], logger=logger, max_episode=350, max_step_per_ep=5, working_dir=wd, time_budget='96:00:00', with_input_blocks=False, with_skip_connection=False, save_controller_every=1, enable_manager_sampling=True)
env = ParallelMultiManagerEnvironment(processes=(len(gpus) if (len(gpus) > 1) else 1), **tmp)
try:
env.train()
except KeyboardInterrupt:
print('user interrupted training')
pass
controller.save_weights(os.path.join(wd, 'controller_weights.h5'))
|
def get_manager(train_data, val_data, controller, model_space, wd, motif_name, verbose=2, **kwargs):
input_node = State('input', shape=(1000, 4), name='input', dtype='float32')
output_node = State('dense', units=1, activation='sigmoid')
model_compile_dict = {'loss': 'binary_crossentropy', 'optimizer': 'adam', 'metrics': ['acc']}
knowledge_fn = MotifKLDivergence(temperature=0.1, Lambda_regularizer=0.0)
knowledge_fn.knowledge_encoder(motif_name_list=[motif_name], motif_file='/mnt/home/zzhang/workspace/src/BioNAS/BioNAS/resources/rbp_motif/encode_motifs.txt.gz', is_log_motif=False)
reward_fn = LossAucReward(method='auc', knowledge_function=knowledge_fn)
child_batch_size = 500
model_fn = KerasModelBuilder(inputs=input_node, outputs=output_node, model_compile_dict=model_compile_dict, model_space=model_space)
manager = GeneralManager(train_data=train_data, validation_data=val_data, epochs=200, child_batchsize=child_batch_size, reward_fn=reward_fn, model_fn=model_fn, store_fn='model_plot', model_compile_dict=model_compile_dict, working_dir=wd, verbose=0, save_full_model=True, model_space=model_space)
return manager
|
def main(arg):
model_space = get_model_space()
(dataset1, dataset2) = read_data()
if (arg.dataset == 1):
(train_data, validation_data) = (dataset1['train'], dataset1['val'])
elif (arg.dataset == 2):
(train_data, validation_data) = (dataset2['train'], dataset2['val'])
else:
raise Exception(('Unknown dataset: %s' % arg.dataset))
manager = get_manager(train_data=train_data, val_data=validation_data, controller=None, model_space=model_space, motif_name=('MYC_known10' if (arg.dataset == 1) else 'CTCF_known1'), wd=arg.wd)
grid_search(model_space, manager, arg.wd, B=arg.B)
|
def get_model_space_simple():
state_space = ModelSpace()
default_params = {'kernel_initializer': 'glorot_uniform', 'activation': 'relu'}
param_list = [[{'filters': 256, 'kernel_size': 8}, {'filters': 256, 'kernel_size': 14}, {'filters': 256, 'kernel_size': 20}]]
layer_embedding_sharing = {}
conv_seen = 0
for i in range(len(param_list)):
conv_states = [State('Identity')]
for j in range(len(param_list[i])):
d = copy.deepcopy(default_params)
for (k, v) in param_list[i][j].items():
d[k] = v
conv_states.append(State('conv1d', name='conv{}'.format(conv_seen), **d))
state_space.add_layer((conv_seen * 3), conv_states)
if (i > 0):
layer_embedding_sharing[(conv_seen * 3)] = 0
conv_seen += 1
if (i < (len(param_list) - 1)):
pool_states = [State('Identity'), State('maxpool1d', pool_size=4, strides=4), State('avgpool1d', pool_size=4, strides=4)]
if (i > 0):
layer_embedding_sharing[((conv_seen * 3) - 2)] = 1
else:
pool_states = [State('Flatten'), State('GlobalMaxPool1D'), State('GlobalAvgPool1D')]
state_space.add_layer(((conv_seen * 3) - 2), pool_states)
state_space.add_layer(((conv_seen * 3) - 1), [State('Identity'), State('Dropout', rate=0.1), State('Dropout', rate=0.3), State('Dropout', rate=0.5)])
if (i > 0):
layer_embedding_sharing[((conv_seen * 3) - 1)] = 2
state_space.add_layer((conv_seen * 3), [State('Dense', units=30, activation='relu'), State('Dense', units=100, activation='relu'), State('Identity')])
return (state_space, layer_embedding_sharing)
|
def get_model_space_long():
state_space = ModelSpace()
default_params = {'kernel_initializer': 'glorot_uniform', 'activation': 'relu'}
param_list = [[{'filters': 16, 'kernel_size': 8}, {'filters': 16, 'kernel_size': 14}, {'filters': 16, 'kernel_size': 20}], [{'filters': 64, 'kernel_size': 8}, {'filters': 64, 'kernel_size': 14}, {'filters': 64, 'kernel_size': 20}], [{'filters': 256, 'kernel_size': 8}, {'filters': 256, 'kernel_size': 14}, {'filters': 256, 'kernel_size': 20}]]
layer_embedding_sharing = {}
conv_seen = 0
for i in range(len(param_list)):
conv_states = [State('conv1d', filters=int(((4 ** (i - 1)) * 16)), kernel_size=1, activation='linear')]
for j in range(len(param_list[i])):
d = copy.deepcopy(default_params)
for (k, v) in param_list[i][j].items():
d[k] = v
conv_states.append(State('conv1d', name='conv{}'.format(conv_seen), **d))
state_space.add_layer((conv_seen * 3), conv_states)
if (i > 0):
layer_embedding_sharing[(conv_seen * 3)] = 0
conv_seen += 1
if (i < (len(param_list) - 1)):
pool_states = [State('Identity'), State('maxpool1d', pool_size=4, strides=4), State('avgpool1d', pool_size=4, strides=4)]
if (i > 0):
layer_embedding_sharing[((conv_seen * 3) - 2)] = 1
else:
pool_states = [State('Flatten'), State('GlobalMaxPool1D'), State('GlobalAvgPool1D')]
state_space.add_layer(((conv_seen * 3) - 2), pool_states)
state_space.add_layer(((conv_seen * 3) - 1), [State('Identity'), State('Dropout', rate=0.1), State('Dropout', rate=0.3), State('Dropout', rate=0.5)])
if (i > 0):
layer_embedding_sharing[((conv_seen * 3) - 1)] = 2
state_space.add_layer((conv_seen * 3), [State('Dense', units=30, activation='relu'), State('Dense', units=100, activation='relu'), State('Identity')])
return (state_space, layer_embedding_sharing)
|
def get_model_space_long_and_dilation():
state_space = ModelSpace()
default_params = {'kernel_initializer': 'glorot_uniform', 'activation': 'relu'}
param_list = [[{'filters': 16, 'kernel_size': 8}, {'filters': 16, 'kernel_size': 14}, {'filters': 16, 'kernel_size': 20}, {'filters': 16, 'kernel_size': 8, 'dilation_rate': 2}, {'filters': 16, 'kernel_size': 14, 'dilation_rate': 2}, {'filters': 16, 'kernel_size': 20, 'dilation_rate': 2}], [{'filters': 64, 'kernel_size': 8}, {'filters': 64, 'kernel_size': 14}, {'filters': 64, 'kernel_size': 20}, {'filters': 64, 'kernel_size': 8, 'dilation_rate': 2}, {'filters': 64, 'kernel_size': 14, 'dilation_rate': 2}, {'filters': 64, 'kernel_size': 20, 'dilation_rate': 2}], [{'filters': 256, 'kernel_size': 8}, {'filters': 256, 'kernel_size': 14}, {'filters': 256, 'kernel_size': 20}, {'filters': 256, 'kernel_size': 8, 'dilation_rate': 2}, {'filters': 256, 'kernel_size': 14, 'dilation_rate': 2}, {'filters': 256, 'kernel_size': 20, 'dilation_rate': 2}]]
layer_embedding_sharing = {}
conv_seen = 0
for i in range(len(param_list)):
conv_states = [State('conv1d', filters=int(((4 ** (i - 1)) * 16)), kernel_size=1, activation='linear')]
for j in range(len(param_list[i])):
d = copy.deepcopy(default_params)
for (k, v) in param_list[i][j].items():
d[k] = v
conv_states.append(State('conv1d', name='conv{}'.format(conv_seen), **d))
state_space.add_layer((conv_seen * 3), conv_states)
if (i > 0):
layer_embedding_sharing[(conv_seen * 3)] = 0
conv_seen += 1
if (i < (len(param_list) - 1)):
pool_states = [State('maxpool1d', pool_size=4, strides=4), State('avgpool1d', pool_size=4, strides=4)]
if (i > 0):
layer_embedding_sharing[((conv_seen * 3) - 2)] = 1
else:
pool_states = [State('Flatten'), State('GlobalMaxPool1D'), State('GlobalAvgPool1D'), State('LSTM', units=256)]
state_space.add_layer(((conv_seen * 3) - 2), pool_states)
state_space.add_layer(((conv_seen * 3) - 1), [State('Dropout', rate=0.1), State('Dropout', rate=0.3), State('Dropout', rate=0.5)])
if (i > 0):
layer_embedding_sharing[((conv_seen * 3) - 1)] = 2
state_space.add_layer((conv_seen * 3), [State('Dense', units=30, activation='relu'), State('Dense', units=100, activation='relu'), State('Identity')])
return (state_space, layer_embedding_sharing)
|
def read_metadata():
meta = pd.read_table('./data/zero_shot/full_metadata.tsv')
meta = meta.loc[(meta['molecule'] == 'DNA')]
indexer = pd.read_table('./data/zero_shot_deepsea/label_index_with_category_annot.tsv')
indexer['labels'] = ['_'.join(x.split('--')).replace('\xa0', '') for x in indexer['labels']]
from collections import Counter
counter = Counter()
new_label = []
for label in indexer['labels']:
if (counter[label] > 0):
new_label.append(('%s_%i' % (label, counter[label])))
else:
new_label.append(label)
counter[label] += 1
indexer.index = new_label
meta['new_name'] = [x.replace('+', '_') for x in meta['new_name']]
meta['col_idx'] = [(indexer.loc[(x, 'index')] if (x in indexer.index) else np.nan) for x in meta['new_name']]
meta = meta.dropna()
meta['col_idx'] = meta['col_idx'].astype('int')
meta.index = meta['feat_name']
return meta
|
def get_zs_controller_configs():
_holder = OrderedDict({'lstm_size': [32, 128], 'temperature': [0.5, 1, 2], 'use_ppo_loss': [True, False]})
_rollout = [x for x in itertools.product(*_holder.values())]
_keys = [k for k in _holder]
configs_all = [{_keys[i]: x[i] for i in range(len(x))} for x in _rollout]
return configs_all
|
def analyze_sim_data(wd):
df = pd.read_table(os.path.join(wd, 'sum_df.tsv'))
d = json.loads(df.iloc[0]['config_str'].replace("'", '"').replace('True', 'true').replace('False', 'false'))
config_keys = [k for k in d]
configs = [[] for _ in range(len(config_keys))]
efficiency = []
specificity = []
config_index = []
for i in range(0, df.shape[0], 2):
d = json.loads(df.iloc[i]['config_str'].replace("'", '"').replace('True', 'true').replace('False', 'false'))
for k in range(len(config_keys)):
configs[k].append(d[config_keys[k]])
efficiency.append(df.iloc[[i, (i + 1)]]['target_median'].sum())
m1_sp = (df.iloc[i]['target_median'] - df.iloc[(i + 1)]['other_median'])
m2_sp = (df.iloc[(i + 1)]['target_median'] - df.iloc[i]['other_median'])
specificity.append((m1_sp + m2_sp))
config_index.append(df.iloc[i]['c'])
data_dict = {'config_index': config_index, 'efficiency': efficiency, 'specificity': specificity}
data_dict.update({config_keys[i]: configs[i] for i in range(len(config_keys))})
eval_df = pd.DataFrame(data_dict, columns=((['config_index'] + config_keys) + ['efficiency', 'specificity']))
eval_df.groupby('config_index').mean().sort_values(by='efficiency', ascending=False).to_csv(os.path.join(wd, 'eval_df.tsv'), sep='\t', index=False, float_format='%.4f')
eval_df.sort_values(by='efficiency', ascending=False).to_csv(os.path.join(wd, 'eval_df.ungrouped.tsv'), sep='\t', index=False, float_format='%.4f')
|
class AutoDeeplab(nn.Module):
def __init__(self, num_classes, num_layers, criterion=None, filter_multiplier=8, block_multiplier=5, step=5, cell=cell_level_search.Cell, input_channels=3):
super(AutoDeeplab, self).__init__()
self.cells = nn.ModuleList()
self._num_layers = num_layers
self._num_classes = num_classes
self._step = step
self._block_multiplier = block_multiplier
self._filter_multiplier = filter_multiplier
self._criterion = criterion
self._initialize_alphas_betas()
f_initial = int(self._filter_multiplier)
half_f_initial = int((f_initial / 2))
self.stem0 = nn.Sequential(nn.Conv2d(input_channels, (half_f_initial * self._block_multiplier), 3, stride=2, padding=1), nn.BatchNorm2d((half_f_initial * self._block_multiplier)), nn.ReLU())
self.stem1 = nn.Sequential(nn.Conv2d((half_f_initial * self._block_multiplier), (half_f_initial * self._block_multiplier), 3, stride=1, padding=1), nn.BatchNorm2d((half_f_initial * self._block_multiplier)), nn.ReLU())
self.stem2 = nn.Sequential(nn.Conv2d((half_f_initial * self._block_multiplier), (f_initial * self._block_multiplier), 3, stride=2, padding=1), nn.BatchNorm2d((f_initial * self._block_multiplier)), nn.ReLU())
for i in range(self._num_layers):
if (i == 0):
cell1 = cell(self._step, self._block_multiplier, (- 1), None, f_initial, None, self._filter_multiplier)
cell2 = cell(self._step, self._block_multiplier, (- 1), f_initial, None, None, (self._filter_multiplier * 2))
self.cells += [cell1]
self.cells += [cell2]
elif (i == 1):
cell1 = cell(self._step, self._block_multiplier, f_initial, None, self._filter_multiplier, (self._filter_multiplier * 2), self._filter_multiplier)
cell2 = cell(self._step, self._block_multiplier, (- 1), self._filter_multiplier, (self._filter_multiplier * 2), None, (self._filter_multiplier * 2))
cell3 = cell(self._step, self._block_multiplier, (- 1), (self._filter_multiplier * 2), None, None, (self._filter_multiplier * 4))
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
elif (i == 2):
cell1 = cell(self._step, self._block_multiplier, self._filter_multiplier, None, self._filter_multiplier, (self._filter_multiplier * 2), self._filter_multiplier)
cell2 = cell(self._step, self._block_multiplier, (self._filter_multiplier * 2), self._filter_multiplier, (self._filter_multiplier * 2), (self._filter_multiplier * 4), (self._filter_multiplier * 2))
cell3 = cell(self._step, self._block_multiplier, (- 1), (self._filter_multiplier * 2), (self._filter_multiplier * 4), None, (self._filter_multiplier * 4))
cell4 = cell(self._step, self._block_multiplier, (- 1), (self._filter_multiplier * 4), None, None, (self._filter_multiplier * 8))
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
elif (i == 3):
cell1 = cell(self._step, self._block_multiplier, self._filter_multiplier, None, self._filter_multiplier, (self._filter_multiplier * 2), self._filter_multiplier)
cell2 = cell(self._step, self._block_multiplier, (self._filter_multiplier * 2), self._filter_multiplier, (self._filter_multiplier * 2), (self._filter_multiplier * 4), (self._filter_multiplier * 2))
cell3 = cell(self._step, self._block_multiplier, (self._filter_multiplier * 4), (self._filter_multiplier * 2), (self._filter_multiplier * 4), (self._filter_multiplier * 8), (self._filter_multiplier * 4))
cell4 = cell(self._step, self._block_multiplier, (- 1), (self._filter_multiplier * 4), (self._filter_multiplier * 8), None, (self._filter_multiplier * 8))
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
else:
cell1 = cell(self._step, self._block_multiplier, self._filter_multiplier, None, self._filter_multiplier, (self._filter_multiplier * 2), self._filter_multiplier)
cell2 = cell(self._step, self._block_multiplier, (self._filter_multiplier * 2), self._filter_multiplier, (self._filter_multiplier * 2), (self._filter_multiplier * 4), (self._filter_multiplier * 2))
cell3 = cell(self._step, self._block_multiplier, (self._filter_multiplier * 4), (self._filter_multiplier * 2), (self._filter_multiplier * 4), (self._filter_multiplier * 8), (self._filter_multiplier * 4))
cell4 = cell(self._step, self._block_multiplier, (self._filter_multiplier * 8), (self._filter_multiplier * 4), (self._filter_multiplier * 8), None, (self._filter_multiplier * 8))
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
self.aspp_4 = nn.Sequential(ASPP((self._filter_multiplier * self._block_multiplier), self._num_classes, 24, 24))
self.aspp_8 = nn.Sequential(ASPP(((self._filter_multiplier * 2) * self._block_multiplier), self._num_classes, 12, 12))
self.aspp_16 = nn.Sequential(ASPP(((self._filter_multiplier * 4) * self._block_multiplier), self._num_classes, 6, 6))
self.aspp_32 = nn.Sequential(ASPP(((self._filter_multiplier * 8) * self._block_multiplier), self._num_classes, 3, 3))
def forward(self, x):
self.level_4 = []
self.level_8 = []
self.level_16 = []
self.level_32 = []
temp = self.stem0(x)
temp = self.stem1(temp)
self.level_4.append(self.stem2(temp))
count = 0
normalized_betas = torch.randn(self._num_layers, 4, 3).cuda()
if (torch.cuda.device_count() > 1):
print('1')
img_device = torch.device('cuda', x.get_device())
normalized_alphas = F.softmax(self.alphas.to(device=img_device), dim=(- 1))
for layer in range(len(self.betas)):
if (layer == 0):
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:].to(device=img_device), dim=(- 1)) * (2 / 3))
elif (layer == 1):
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:].to(device=img_device), dim=(- 1)) * (2 / 3))
normalized_betas[layer][1] = F.softmax(self.betas[layer][1].to(device=img_device), dim=(- 1))
elif (layer == 2):
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:].to(device=img_device), dim=(- 1)) * (2 / 3))
normalized_betas[layer][1] = F.softmax(self.betas[layer][1].to(device=img_device), dim=(- 1))
normalized_betas[layer][2] = F.softmax(self.betas[layer][2].to(device=img_device), dim=(- 1))
else:
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:].to(device=img_device), dim=(- 1)) * (2 / 3))
normalized_betas[layer][1] = F.softmax(self.betas[layer][1].to(device=img_device), dim=(- 1))
normalized_betas[layer][2] = F.softmax(self.betas[layer][2].to(device=img_device), dim=(- 1))
normalized_betas[layer][3][:2] = (F.softmax(self.betas[layer][3][:1].to(device=img_device), dim=(- 1)) * (2 / 3))
else:
normalized_alphas = F.softmax(self.alphas, dim=(- 1))
for layer in range(len(self.betas)):
if (layer == 0):
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:], dim=(- 1)) * (2 / 3))
elif (layer == 1):
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:], dim=(- 1)) * (2 / 3))
normalized_betas[layer][1] = F.softmax(self.betas[layer][1], dim=(- 1))
elif (layer == 2):
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:], dim=(- 1)) * (2 / 3))
normalized_betas[layer][1] = F.softmax(self.betas[layer][1], dim=(- 1))
normalized_betas[layer][2] = F.softmax(self.betas[layer][2], dim=(- 1))
else:
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:], dim=(- 1)) * (2 / 3))
normalized_betas[layer][1] = F.softmax(self.betas[layer][1], dim=(- 1))
normalized_betas[layer][2] = F.softmax(self.betas[layer][2], dim=(- 1))
normalized_betas[layer][3][:2] = (F.softmax(self.betas[layer][3][:2], dim=(- 1)) * (2 / 3))
for layer in range(self._num_layers):
if (layer == 0):
(level4_new,) = self.cells[count](None, None, self.level_4[(- 1)], None, normalized_alphas)
count += 1
(level8_new,) = self.cells[count](None, self.level_4[(- 1)], None, None, normalized_alphas)
count += 1
level4_new = (normalized_betas[layer][0][1] * level4_new)
level8_new = (normalized_betas[layer][0][2] * level8_new)
self.level_4.append(level4_new)
self.level_8.append(level8_new)
elif (layer == 1):
(level4_new_1, level4_new_2) = self.cells[count](self.level_4[(- 2)], None, self.level_4[(- 1)], self.level_8[(- 1)], normalized_alphas)
count += 1
level4_new = ((normalized_betas[layer][0][1] * level4_new_1) + (normalized_betas[layer][1][0] * level4_new_2))
(level8_new_1, level8_new_2) = self.cells[count](None, self.level_4[(- 1)], self.level_8[(- 1)], None, normalized_alphas)
count += 1
level8_new = ((normalized_betas[layer][0][2] * level8_new_1) + (normalized_betas[layer][1][2] * level8_new_2))
(level16_new,) = self.cells[count](None, self.level_8[(- 1)], None, None, normalized_alphas)
level16_new = (normalized_betas[layer][1][2] * level16_new)
count += 1
self.level_4.append(level4_new)
self.level_8.append(level8_new)
self.level_16.append(level16_new)
elif (layer == 2):
(level4_new_1, level4_new_2) = self.cells[count](self.level_4[(- 2)], None, self.level_4[(- 1)], self.level_8[(- 1)], normalized_alphas)
count += 1
level4_new = ((normalized_betas[layer][0][1] * level4_new_1) + (normalized_betas[layer][1][0] * level4_new_2))
(level8_new_1, level8_new_2, level8_new_3) = self.cells[count](self.level_8[(- 2)], self.level_4[(- 1)], self.level_8[(- 1)], self.level_16[(- 1)], normalized_alphas)
count += 1
level8_new = (((normalized_betas[layer][0][2] * level8_new_1) + (normalized_betas[layer][1][1] * level8_new_2)) + (normalized_betas[layer][2][0] * level8_new_3))
(level16_new_1, level16_new_2) = self.cells[count](None, self.level_8[(- 1)], self.level_16[(- 1)], None, normalized_alphas)
count += 1
level16_new = ((normalized_betas[layer][1][2] * level16_new_1) + (normalized_betas[layer][2][1] * level16_new_2))
(level32_new,) = self.cells[count](None, self.level_16[(- 1)], None, None, normalized_alphas)
level32_new = (normalized_betas[layer][2][2] * level32_new)
count += 1
self.level_4.append(level4_new)
self.level_8.append(level8_new)
self.level_16.append(level16_new)
self.level_32.append(level32_new)
elif (layer == 3):
(level4_new_1, level4_new_2) = self.cells[count](self.level_4[(- 2)], None, self.level_4[(- 1)], self.level_8[(- 1)], normalized_alphas)
count += 1
level4_new = ((normalized_betas[layer][0][1] * level4_new_1) + (normalized_betas[layer][1][0] * level4_new_2))
(level8_new_1, level8_new_2, level8_new_3) = self.cells[count](self.level_8[(- 2)], self.level_4[(- 1)], self.level_8[(- 1)], self.level_16[(- 1)], normalized_alphas)
count += 1
level8_new = (((normalized_betas[layer][0][2] * level8_new_1) + (normalized_betas[layer][1][1] * level8_new_2)) + (normalized_betas[layer][2][0] * level8_new_3))
(level16_new_1, level16_new_2, level16_new_3) = self.cells[count](self.level_16[(- 2)], self.level_8[(- 1)], self.level_16[(- 1)], self.level_32[(- 1)], normalized_alphas)
count += 1
level16_new = (((normalized_betas[layer][1][2] * level16_new_1) + (normalized_betas[layer][2][1] * level16_new_2)) + (normalized_betas[layer][3][0] * level16_new_3))
(level32_new_1, level32_new_2) = self.cells[count](None, self.level_16[(- 1)], self.level_32[(- 1)], None, normalized_alphas)
count += 1
level32_new = ((normalized_betas[layer][2][2] * level32_new_1) + (normalized_betas[layer][3][1] * level32_new_2))
self.level_4.append(level4_new)
self.level_8.append(level8_new)
self.level_16.append(level16_new)
self.level_32.append(level32_new)
else:
(level4_new_1, level4_new_2) = self.cells[count](self.level_4[(- 2)], None, self.level_4[(- 1)], self.level_8[(- 1)], normalized_alphas)
count += 1
level4_new = ((normalized_betas[layer][0][1] * level4_new_1) + (normalized_betas[layer][1][0] * level4_new_2))
(level8_new_1, level8_new_2, level8_new_3) = self.cells[count](self.level_8[(- 2)], self.level_4[(- 1)], self.level_8[(- 1)], self.level_16[(- 1)], normalized_alphas)
count += 1
level8_new = (((normalized_betas[layer][0][2] * level8_new_1) + (normalized_betas[layer][1][1] * level8_new_2)) + (normalized_betas[layer][2][0] * level8_new_3))
(level16_new_1, level16_new_2, level16_new_3) = self.cells[count](self.level_16[(- 2)], self.level_8[(- 1)], self.level_16[(- 1)], self.level_32[(- 1)], normalized_alphas)
count += 1
level16_new = (((normalized_betas[layer][1][2] * level16_new_1) + (normalized_betas[layer][2][1] * level16_new_2)) + (normalized_betas[layer][3][0] * level16_new_3))
(level32_new_1, level32_new_2) = self.cells[count](self.level_32[(- 2)], self.level_16[(- 1)], self.level_32[(- 1)], None, normalized_alphas)
count += 1
level32_new = ((normalized_betas[layer][2][2] * level32_new_1) + (normalized_betas[layer][3][1] * level32_new_2))
self.level_4.append(level4_new)
self.level_8.append(level8_new)
self.level_16.append(level16_new)
self.level_32.append(level32_new)
self.level_4 = self.level_4[(- 2):]
self.level_8 = self.level_8[(- 2):]
self.level_16 = self.level_16[(- 2):]
self.level_32 = self.level_32[(- 2):]
aspp_result_4 = self.aspp_4(self.level_4[(- 1)])
aspp_result_8 = self.aspp_8(self.level_8[(- 1)])
aspp_result_16 = self.aspp_16(self.level_16[(- 1)])
aspp_result_32 = self.aspp_32(self.level_32[(- 1)])
upsample = nn.Upsample(size=x.size()[2:], mode='bilinear', align_corners=True)
aspp_result_4 = upsample(aspp_result_4)
aspp_result_8 = upsample(aspp_result_8)
aspp_result_16 = upsample(aspp_result_16)
aspp_result_32 = upsample(aspp_result_32)
sum_feature_map = (((aspp_result_4 + aspp_result_8) + aspp_result_16) + aspp_result_32)
return sum_feature_map
def _initialize_alphas_betas(self):
k = sum((1 for i in range(self._step) for n in range((2 + i))))
num_ops = len(PRIMITIVES)
alphas = (0.001 * torch.randn(k, num_ops)).clone().detach().requires_grad_(True)
betas = (0.001 * torch.randn(self._num_layers, 4, 3)).clone().detach().requires_grad_(True)
self._arch_parameters = [alphas, betas]
self._arch_param_names = ['alphas', 'betas']
[self.register_parameter(name, torch.nn.Parameter(param)) for (name, param) in zip(self._arch_param_names, self._arch_parameters)]
def arch_parameters(self):
return [param for (name, param) in self.named_parameters() if (name in self._arch_param_names)]
def weight_parameters(self):
return [param for (name, param) in self.named_parameters() if (name not in self._arch_param_names)]
def genotype(self):
decoder = Decoder(self.alphas_cell, self._block_multiplier, self._step)
return decoder.genotype_decode()
def _loss(self, input, target):
logits = self(input)
return self._criterion(logits, target)
|
def main():
model = AutoDeeplab(7, 12, None)
x = torch.tensor(torch.ones(4, 3, 224, 224))
resultdfs = model.decode_dfs()
resultviterbi = model.decode_viterbi()[0]
print(resultviterbi)
print(model.genotype())
|
class MixedOp(nn.Module):
def __init__(self, C, stride):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in PRIMITIVES:
op = OPS[primitive](C, stride, False, False)
if ('pool' in primitive):
op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))
self._ops.append(op)
def forward(self, x, weights):
return sum(((w * op(x)) for (w, op) in zip(weights, self._ops)))
|
class Cell(nn.Module):
def __init__(self, steps, block_multiplier, prev_prev_fmultiplier, prev_fmultiplier_down, prev_fmultiplier_same, prev_fmultiplier_up, filter_multiplier):
super(Cell, self).__init__()
self.C_in = (block_multiplier * filter_multiplier)
self.C_out = filter_multiplier
self.C_prev_prev = int((prev_prev_fmultiplier * block_multiplier))
self._prev_fmultiplier_same = prev_fmultiplier_same
if (prev_fmultiplier_down is not None):
self.C_prev_down = int((prev_fmultiplier_down * block_multiplier))
self.preprocess_down = ReLUConvBN(self.C_prev_down, self.C_out, 1, 1, 0, affine=False)
if (prev_fmultiplier_same is not None):
self.C_prev_same = int((prev_fmultiplier_same * block_multiplier))
self.preprocess_same = ReLUConvBN(self.C_prev_same, self.C_out, 1, 1, 0, affine=False)
if (prev_fmultiplier_up is not None):
self.C_prev_up = int((prev_fmultiplier_up * block_multiplier))
self.preprocess_up = ReLUConvBN(self.C_prev_up, self.C_out, 1, 1, 0, affine=False)
if (prev_prev_fmultiplier != (- 1)):
self.pre_preprocess = ReLUConvBN(self.C_prev_prev, self.C_out, 1, 1, 0, affine=False)
self._steps = steps
self.block_multiplier = block_multiplier
self._ops = nn.ModuleList()
for i in range(self._steps):
for j in range((2 + i)):
stride = 1
if ((prev_prev_fmultiplier == (- 1)) and (j == 0)):
op = None
else:
op = MixedOp(self.C_out, stride)
self._ops.append(op)
self._initialize_weights()
def scale_dimension(self, dim, scale):
assert isinstance(dim, int)
return (int((((float(dim) - 1.0) * scale) + 1.0)) if (dim % 2) else int((dim * scale)))
def prev_feature_resize(self, prev_feature, mode):
if (mode == 'down'):
feature_size_h = self.scale_dimension(prev_feature.shape[2], 0.5)
feature_size_w = self.scale_dimension(prev_feature.shape[3], 0.5)
elif (mode == 'up'):
feature_size_h = self.scale_dimension(prev_feature.shape[2], 2)
feature_size_w = self.scale_dimension(prev_feature.shape[3], 2)
return F.interpolate(prev_feature, (feature_size_h, feature_size_w), mode='bilinear', align_corners=True)
def forward(self, s0, s1_down, s1_same, s1_up, n_alphas):
if (s1_down is not None):
s1_down = self.prev_feature_resize(s1_down, 'down')
s1_down = self.preprocess_down(s1_down)
(size_h, size_w) = (s1_down.shape[2], s1_down.shape[3])
if (s1_same is not None):
s1_same = self.preprocess_same(s1_same)
(size_h, size_w) = (s1_same.shape[2], s1_same.shape[3])
if (s1_up is not None):
s1_up = self.prev_feature_resize(s1_up, 'up')
s1_up = self.preprocess_up(s1_up)
(size_h, size_w) = (s1_up.shape[2], s1_up.shape[3])
all_states = []
if (s0 is not None):
s0 = (F.interpolate(s0, (size_h, size_w), mode='bilinear', align_corners=True) if ((s0.shape[2] != size_h) or (s0.shape[3] != size_w)) else s0)
s0 = (self.pre_preprocess(s0) if (s0.shape[1] != self.C_out) else s0)
if (s1_down is not None):
states_down = [s0, s1_down]
all_states.append(states_down)
if (s1_same is not None):
states_same = [s0, s1_same]
all_states.append(states_same)
if (s1_up is not None):
states_up = [s0, s1_up]
all_states.append(states_up)
else:
if (s1_down is not None):
states_down = [0, s1_down]
all_states.append(states_down)
if (s1_same is not None):
states_same = [0, s1_same]
all_states.append(states_same)
if (s1_up is not None):
states_up = [0, s1_up]
all_states.append(states_up)
final_concates = []
for states in all_states:
offset = 0
for i in range(self._steps):
new_states = []
for (j, h) in enumerate(states):
branch_index = (offset + j)
if (self._ops[branch_index] is None):
continue
new_state = self._ops[branch_index](h, n_alphas[branch_index])
new_states.append(new_state)
s = sum(new_states)
offset += len(states)
states.append(s)
concat_feature = torch.cat(states[(- self.block_multiplier):], dim=1)
final_concates.append(concat_feature)
return final_concates
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
if (m.weight is not None):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
def obtain_decode_args():
parser = argparse.ArgumentParser(description='PyTorch DeeplabV3Plus Training')
parser.add_argument('--backbone', type=str, default='resnet', choices=['resnet', 'xception', 'drn', 'mobilenet'], help='backbone name (default: resnet)')
parser.add_argument('--dataset', type=str, default='darcyflow', choices=['darcyflow', 'protein', 'cosmic'], help='dataset name (default: darcyflow)')
parser.add_argument('--autodeeplab', type=str, default='train', choices=['search', 'train'])
parser.add_argument('--load-parallel', type=int, default=0)
parser.add_argument('--clean-module', type=int, default=0)
parser.add_argument('--crop_size', type=int, default=320, help='crop image size')
parser.add_argument('--resize', type=int, default=512, help='resize image size')
parser.add_argument('--filter_multiplier', type=int, default=8)
parser.add_argument('--block_multiplier', type=int, default=5)
parser.add_argument('--step', type=int, default=5)
parser.add_argument('--batch-size', type=int, default=2, metavar='N', help='input batch size for training (default: auto)')
parser.add_argument('--test-batch-size', type=int, default=None, metavar='N', help='input batch size for testing (default: auto)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--resume', type=str, default=None, help='put the path to resuming file if needed')
return parser.parse_args()
|
def obtain_evaluate_args():
parser = argparse.ArgumentParser(description='---------------------evaluate args---------------------')
parser.add_argument('--train', action='store_true', default=False, help='training mode')
parser.add_argument('--exp', type=str, default='bnlr7e-3', help='name of experiment')
parser.add_argument('--gpu', type=int, default=0, help='test time gpu device id')
parser.add_argument('--backbone', type=str, default='resnet101', help='resnet101')
parser.add_argument('--dataset', type=str, default='darcyflow', help='darcyflow, protein, or cosmic')
parser.add_argument('--groups', type=int, default=None, help='num of groups for group normalization')
parser.add_argument('--epochs', type=int, default=30, help='num of training epochs')
parser.add_argument('--batch_size', type=int, default=8, help='batch size')
parser.add_argument('--base_lr', type=float, default=0.00025, help='base learning rate')
parser.add_argument('--last_mult', type=float, default=1.0, help='learning rate multiplier for last layers')
parser.add_argument('--scratch', action='store_true', default=False, help='train from scratch')
parser.add_argument('--freeze_bn', action='store_true', default=False, help='freeze batch normalization parameters')
parser.add_argument('--weight_std', action='store_true', default=False, help='weight standardization')
parser.add_argument('--beta', action='store_true', default=False, help='resnet101 beta')
parser.add_argument('--crop_size', type=int, default=513, help='image crop size')
parser.add_argument('--resume', type=str, default=None, help='path to checkpoint to resume from')
parser.add_argument('--workers', type=int, default=4, help='number of data loading workers')
return parser
|
def obtain_retrain_autodeeplab_args():
parser = argparse.ArgumentParser(description='PyTorch Autodeeplabv3+ Training')
parser.add_argument('--train', action='store_true', default=True, help='training mode')
parser.add_argument('--exp', type=str, default='bnlr7e-3', help='name of experiment')
parser.add_argument('--gpu', type=str, default='0', help='test time gpu device id')
parser.add_argument('--backbone', type=str, default='autodeeplab', help='resnet101')
parser.add_argument('--dataset', type=str, default='cityscapes', help='pascal or cityscapes')
parser.add_argument('--groups', type=int, default=None, help='num of groups for group normalization')
parser.add_argument('--epochs', type=int, default=4000, help='num of training epochs')
parser.add_argument('--batch_size', type=int, default=14, help='batch size')
parser.add_argument('--base_lr', type=float, default=0.05, help='base learning rate')
parser.add_argument('--warmup_start_lr', type=float, default=5e-06, help='warm up learning rate')
parser.add_argument('--lr-step', type=float, default=None)
parser.add_argument('--warmup-iters', type=int, default=1000)
parser.add_argument('--min-lr', type=float, default=None)
parser.add_argument('--last_mult', type=float, default=1.0, help='learning rate multiplier for last layers')
parser.add_argument('--scratch', action='store_true', default=False, help='train from scratch')
parser.add_argument('--freeze_bn', action='store_true', default=False, help='freeze batch normalization parameters')
parser.add_argument('--weight_std', action='store_true', default=False, help='weight standardization')
parser.add_argument('--beta', action='store_true', default=False, help='resnet101 beta')
parser.add_argument('--crop_size', type=int, default=769, help='image crop size')
parser.add_argument('--resize', type=int, default=769, help='image crop size')
parser.add_argument('--workers', type=int, default=4, help='number of data loading workers')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--resume', type=str, default=None, help='put the path to resuming file if needed')
parser.add_argument('--filter_multiplier', type=int, default=32)
parser.add_argument('--dist', type=bool, default=False)
parser.add_argument('--autodeeplab', type=str, default='train')
parser.add_argument('--block_multiplier', type=int, default=5)
parser.add_argument('--use-ABN', action='store_true')
parser.add_argument('--affine', default=False, type=bool, help='whether use affine in BN')
parser.add_argument('--port', default=6000, type=int)
parser.add_argument('--max-iteration', default=1000000, type=bool)
parser.add_argument('--net_arch', default=None, type=str)
parser.add_argument('--cell_arch', default=None, type=str)
parser.add_argument('--criterion', default='Ohem', type=str)
parser.add_argument('--initial-fm', default=None, type=int)
parser.add_argument('--mode', default='poly', type=str, help='how lr decline')
parser.add_argument('--local_rank', dest='local_rank', type=int, default=(- 1))
parser.add_argument('--train_mode', type=str, default='iter', choices=['iter', 'epoch'])
parser.add_argument('--sub', type=int, default=5)
args = parser.parse_args()
return args
|
def obtain_retrain_deeplab_v3plus_args():
parser = argparse.ArgumentParser(description='PyTorch DeeplabV3Plus Training')
parser.add_argument('--backbone', type=str, default='resnet', choices=['resnet', 'xception', 'drn', 'mobilenet'], help='backbone name (default: resnet)')
parser.add_argument('--out-stride', type=int, default=16, help='network output stride (default: 8)')
parser.add_argument('--dataset', type=str, default='pascal', choices=['pascal', 'coco', 'cityscapes'], help='dataset name (default: pascal)')
parser.add_argument('--use-sbd', action='store_true', default=True, help='whether to use SBD dataset (default: True)')
parser.add_argument('--workers', type=int, default=4, metavar='N', help='dataloader threads')
parser.add_argument('--base-size', type=int, default=513, help='base image size')
parser.add_argument('--crop-size', type=int, default=513, help='crop image size')
parser.add_argument('--sync-bn', type=bool, default=None, help='whether to use sync bn (default: auto)')
parser.add_argument('--freeze-bn', type=bool, default=False, help='whether to freeze bn parameters (default: False)')
parser.add_argument('--loss-type', type=str, default='ce', choices=['ce', 'focal'], help='loss func type (default: ce)')
parser.add_argument('--epochs', type=int, default=None, metavar='N', help='number of epochs to train (default: auto)')
parser.add_argument('--start_epoch', type=int, default=0, metavar='N', help='start epochs (default:0)')
parser.add_argument('--batch-size', type=int, default=None, metavar='N', help='input batch size for training (default: auto)')
parser.add_argument('--test-batch-size', type=int, default=None, metavar='N', help='input batch size for testing (default: auto)')
parser.add_argument('--use-balanced-weights', action='store_true', default=False, help='whether to use balanced weights (default: False)')
parser.add_argument('--lr', type=float, default=None, metavar='LR', help='learning rate (default: auto)')
parser.add_argument('--lr-scheduler', type=str, default='poly', choices=['poly', 'step', 'cos'], help='lr scheduler mode: (default: poly)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0005, metavar='M', help='w-decay (default: 5e-4)')
parser.add_argument('--nesterov', action='store_true', default=False, help='whether use nesterov (default: False)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0', help='use which gpu to train, must be a comma-separated list of integers only (default=0)')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--resume', type=str, default=None, help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default=None, help='set the checkpoint name')
parser.add_argument('--ft', action='store_true', default=False, help='finetuning on a different dataset')
parser.add_argument('--eval-interval', type=int, default=1, help='evaluuation interval (default: 1)')
parser.add_argument('--no-val', action='store_true', default=False, help='skip validation during training')
parser.add_argument('--use-ABN', default=True, type=bool, help='whether use ABN')
parser.add_argument('--affine', default=False, type=bool, help='whether use affine in BN')
args = parser.parse_args()
return args
|
class Config(object):
def __init__(self):
self.ignore_label = 255
self.aspp_global_feature = False
self.n_classes = 19
self.datapth = '/dataset/Cityscapes_dataset'
self.gpus = 8
self.crop_size = (769, 769)
self.mean = (0.485, 0.456, 0.406)
self.std = (0.229, 0.224, 0.225)
self.warmup_steps = 1000
self.warmup_start_lr = 5e-06
self.lr_start = 0.03
self.momentum = 0.9
self.weight_decay = 0.0005
self.lr_power = 0.9
self.max_iter = 41000
self.max_epoch = 8000
self.scales = (0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0)
self.flip = True
self.brightness = 0.5
self.contrast = 0.5
self.saturation = 0.5
self.ims_per_gpu = 2
self.n_workers = (2 * self.gpus)
self.msg_iter = 50
self.ohem_thresh = 0.7
self.respth = './log'
self.port = 32168
self.eval_batchsize = 2
self.eval_n_workers = 2
self.eval_scale = (1.0,)
self.eval_scales = (0.25, 0.5, 0.75, 1, 1.25, 1.5)
self.eval_flip = True
|
def obtain_search_args():
parser = argparse.ArgumentParser(description='PyTorch DeeplabV3Plus Training')
parser.add_argument('--backbone', type=str, default='resnet', choices=['resnet', 'xception', 'drn', 'mobilenet'], help='backbone name (default: resnet)')
parser.add_argument('--opt_level', type=str, default='O0', choices=['O0', 'O1', 'O2', 'O3'], help='opt level for half percision training (default: O0)')
parser.add_argument('--out-stride', type=int, default=16, help='network output stride (default: 8)')
parser.add_argument('--dataset', type=str, default='darcyflow', choices=['darcyflow', 'protein', 'cosmic'], help='dataset name (default: darcyflow)')
parser.add_argument('--autodeeplab', type=str, default='search', choices=['search', 'train'])
parser.add_argument('--use-sbd', action='store_true', default=False, help='whether to use SBD dataset (default: True)')
parser.add_argument('--load-parallel', type=int, default=0)
parser.add_argument('--clean-module', type=int, default=0)
parser.add_argument('--workers', type=int, default=0, metavar='N', help='dataloader threads')
parser.add_argument('--base_size', type=int, default=320, help='base image size')
parser.add_argument('--crop_size', type=int, default=321, help='crop image size')
parser.add_argument('--resize', type=int, default=512, help='resize image size')
parser.add_argument('--sync-bn', type=bool, default=None, help='whether to use sync bn (default: auto)')
parser.add_argument('--freeze-bn', type=bool, default=False, help='whether to freeze bn parameters (default: False)')
parser.add_argument('--loss-type', type=str, default='ce', choices=['ce', 'focal'], help='loss func type (default: ce)')
parser.add_argument('--epochs', type=int, default=None, metavar='N', help='number of epochs to train (default: auto)')
parser.add_argument('--start_epoch', type=int, default=0, metavar='N', help='start epochs (default:0)')
parser.add_argument('--filter_multiplier', type=int, default=8)
parser.add_argument('--block_multiplier', type=int, default=5)
parser.add_argument('--step', type=int, default=5)
parser.add_argument('--alpha_epoch', type=int, default=20, metavar='N', help='epoch to start training alphas')
parser.add_argument('--batch-size', type=int, default=8, metavar='N', help='input batch size for training (default: auto)')
parser.add_argument('--test-batch-size', type=int, default=None, metavar='N', help='input batch size for testing (default: auto)')
parser.add_argument('--use_balanced_weights', action='store_true', default=False, help='whether to use balanced weights (default: False)')
parser.add_argument('--lr', type=float, default=0.025, metavar='LR', help='learning rate (default: auto)')
parser.add_argument('--min_lr', type=float, default=0.001)
parser.add_argument('--arch-lr', type=float, default=0.003, metavar='LR', help='learning rate for alpha and beta in architect searching process')
parser.add_argument('--lr-scheduler', type=str, default='cos', choices=['poly', 'step', 'cos'], help='lr scheduler mode')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0003, metavar='M', help='w-decay (default: 5e-4)')
parser.add_argument('--arch-weight-decay', type=float, default=0.001, metavar='M', help='w-decay (default: 5e-4)')
parser.add_argument('--nesterov', action='store_true', default=False, help='whether use nesterov (default: False)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--use_amp', action='store_true', default=False)
parser.add_argument('--gpu-ids', type=str, default='0', help='use which gpu to train, must be a comma-separated list of integers only (default=0)')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--resume', type=str, default=None, help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default=None, help='set the checkpoint name')
parser.add_argument('--ft', action='store_true', default=False, help='finetuning on a different dataset')
parser.add_argument('--eval-interval', type=int, default=1, help='evaluuation interval (default: 1)')
parser.add_argument('--no-val', action='store_true', default=False, help='skip validation during training')
parser.add_argument('--affine', default=False, type=bool, help='whether use affine in BN')
parser.add_argument('--multi_scale', default=(0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0), type=bool, help='whether use multi_scale in train')
args = parser.parse_args()
return args
|
class Loader(object):
def __init__(self, args):
self.args = args
self.args.nclass = 1
self.best_pred = 0.0
assert (args.resume is not None), RuntimeError("No model to decode in resume path: '{:}'".format(args.resume))
assert os.path.isfile(args.resume), RuntimeError("=> no checkpoint found at '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
self._alphas = checkpoint['state_dict']['alphas']
self._betas = checkpoint['state_dict']['betas']
self.decoder = Decoder(alphas=self._alphas, betas=self._betas, steps=5)
def retreive_alphas_betas(self):
return (self._alphas, self._betas)
def decode_architecture(self):
(paths, paths_space) = self.decoder.viterbi_decode()
return (paths, paths_space)
def decode_cell(self):
genotype = self.decoder.genotype_decode()
return genotype
|
def get_new_network_cell():
args = obtain_decode_args()
args.cuda = ((not args.no_cuda) and torch.cuda.is_available())
load_model = Loader(args)
(result_paths, result_paths_space) = load_model.decode_architecture()
network_path = result_paths
network_path_space = result_paths_space
genotype = load_model.decode_cell()
print('architecture search results:', network_path)
print('new cell structure:', genotype)
dir_name = os.path.dirname(args.resume)
network_path_filename = os.path.join(dir_name, 'network_path')
network_path_space_filename = os.path.join(dir_name, 'network_path_space')
genotype_filename = os.path.join(dir_name, 'genotype')
np.save(network_path_filename, network_path)
np.save(network_path_space_filename, network_path_space)
np.save(genotype_filename, genotype)
print('saved to :', dir_name)
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, dilation=dilation, padding=dilation, bias=False)
self.bn2 = BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, nInputChannels, block, layers, os=16, pretrained=False):
self.inplanes = 64
super(ResNet, self).__init__()
if (os == 16):
strides = [1, 2, 2, 1]
dilations = [1, 1, 1, 2]
blocks = [1, 2, 4]
elif (os == 8):
strides = [1, 2, 1, 1]
dilations = [1, 1, 2, 2]
blocks = [1, 2, 1]
else:
raise NotImplementedError
self.conv1 = nn.Conv2d(nInputChannels, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2])
self.layer4 = self._make_MG_unit(block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3])
self._init_weight()
if pretrained:
self._load_pretrained_model()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_MG_unit(self, block, planes, blocks=[1, 2, 4], stride=1, dilation=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, dilation=(blocks[0] * dilation), downsample=downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, len(blocks)):
layers.append(block(self.inplanes, planes, stride=1, dilation=(blocks[i] * dilation)))
return nn.Sequential(*layers)
def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
low_level_feat = x
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return (x, low_level_feat)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/resnet101-5d3b4d8f.pth')
model_dict = {}
state_dict = self.state_dict()
for (k, v) in pretrain_dict.items():
if (k in state_dict):
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
|
def ResNet101(nInputChannels=3, os=16, pretrained=False):
model = ResNet(nInputChannels, Bottleneck, [3, 4, 23, 3], os, pretrained=pretrained)
return model
|
class ASPP_module(nn.Module):
def __init__(self, inplanes, planes, dilation):
super(ASPP_module, self).__init__()
if (dilation == 1):
kernel_size = 1
padding = 0
else:
kernel_size = 3
padding = dilation
self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = BatchNorm2d(planes)
self.relu = nn.ReLU()
self._init_weight()
def forward(self, x):
x = self.atrous_convolution(x)
x = self.bn(x)
return self.relu(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
class DeepLabv3_plus(nn.Module):
def __init__(self, nInputChannels=3, n_classes=21, os=16, pretrained=False, freeze_bn=False, _print=True):
if _print:
print('Constructing DeepLabv3+ model...')
print('Backbone: Resnet-101')
print('Number of classes: {}'.format(n_classes))
print('Output stride: {}'.format(os))
print('Number of Input Channels: {}'.format(nInputChannels))
super(DeepLabv3_plus, self).__init__()
self.resnet_features = ResNet101(nInputChannels, os, pretrained=pretrained)
if (os == 16):
dilations = [1, 6, 12, 18]
elif (os == 8):
dilations = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = ASPP_module(2048, 256, dilation=dilations[0])
self.aspp2 = ASPP_module(2048, 256, dilation=dilations[1])
self.aspp3 = ASPP_module(2048, 256, dilation=dilations[2])
self.aspp4 = ASPP_module(2048, 256, dilation=dilations[3])
self.relu = nn.ReLU()
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), nn.Conv2d(2048, 256, 1, stride=1, bias=False), BatchNorm2d(256), nn.ReLU())
self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.bn1 = BatchNorm2d(256)
self.conv2 = nn.Conv2d(256, 48, 1, bias=False)
self.bn2 = BatchNorm2d(48)
self.last_conv = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False), BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False), BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, n_classes, kernel_size=1, stride=1))
if freeze_bn:
self._freeze_bn()
def forward(self, input):
(x, low_level_features) = self.resnet_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = F.upsample(x, size=(int(math.ceil((input.size()[(- 2)] / 4))), int(math.ceil((input.size()[(- 1)] / 4)))), mode='bilinear', align_corners=True)
low_level_features = self.conv2(low_level_features)
low_level_features = self.bn2(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.last_conv(x)
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def _freeze_bn(self):
for m in self.modules():
if isinstance(m, BatchNorm2d):
m.eval()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
def get_1x_lr_params(model):
'\n This generator returns all the parameters of the net except for\n the last classification layer. Note that for each batchnorm layer,\n requires_grad is set to False in deeplab_resnet.py, therefore this function does not return\n any batchnorm parameter\n '
b = [model.resnet_features]
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
(yield k)
|
def get_10x_lr_params(model):
'\n This generator returns all the parameters for the last layer of the net,\n which does the classification of pixel into classes\n '
b = [model.aspp1, model.aspp2, model.aspp3, model.aspp4, model.conv1, model.conv2, model.last_conv]
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
(yield k)
|
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=0, dilation=1, bias=False):
super(SeparableConv2d, self)._init_()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
|
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = (kernel_size + ((kernel_size - 1) * (dilation - 1)))
pad_total = (kernel_size_effective - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
|
class SeparableConv2d_same(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False):
super(SeparableConv2d_same, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation, groups=inplanes, bias=bias)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = fixed_padding(x, self.conv1.kernel_size[0], dilation=self.conv1.dilation[0])
x = self.conv1(x)
x = self.pointwise(x)
return x
|
class Block(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, start_with_relu=True, grow_first=True, is_last=False):
super(Block, self).__init__()
if ((planes != inplanes) or (stride != 1)):
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = BatchNorm2d(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
rep.append(BatchNorm2d(planes))
filters = planes
for i in range((reps - 1)):
rep.append(self.relu)
rep.append(SeparableConv2d_same(filters, filters, 3, stride=1, dilation=dilation))
rep.append(BatchNorm2d(filters))
if (not grow_first):
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
rep.append(BatchNorm2d(planes))
if (not start_with_relu):
rep = rep[1:]
if (stride != 1):
rep.append(SeparableConv2d_same(planes, planes, 3, stride=2))
if ((stride == 1) and is_last):
rep.append(SeparableConv2d_same(planes, planes, 3, stride=1))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if (self.skip is not None):
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x += skip
return x
|
class Xception(nn.Module):
'\n Modified Alighed Xception\n '
def __init__(self, inplanes=3, os=16, pretrained=False):
super(Xception, self).__init__()
if (os == 16):
entry_block3_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 2)
elif (os == 8):
entry_block3_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
else:
raise NotImplementedError
self.conv1 = nn.Conv2d(inplanes, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = BatchNorm2d(64)
self.block1 = Block(64, 128, reps=2, stride=2, start_with_relu=False)
self.block2 = Block(128, 256, reps=2, stride=2, start_with_relu=True, grow_first=True)
self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, start_with_relu=True, grow_first=True, is_last=True)
self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True, grow_first=True)
self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_dilations[0], start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d_same(1024, 1536, 3, stride=1, dilation=exit_block_dilations[1])
self.bn3 = BatchNorm2d(1536)
self.conv4 = SeparableConv2d_same(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1])
self.bn4 = BatchNorm2d(1536)
self.conv5 = SeparableConv2d_same(1536, 2048, 3, stride=1, dilation=exit_block_dilations[1])
self.bn5 = BatchNorm2d(2048)
self._init_weight()
if pretrained:
self._load_xception_pretrained()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
low_level_feat = x
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
x = self.block20(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return (x, low_level_feat)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_xception_pretrained(self):
pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth')
model_dict = {}
state_dict = self.state_dict()
for (k, v) in pretrain_dict.items():
if (k in model_dict):
if ('pointwise' in k):
v = v.unsqueeze((- 1)).unsqueeze((- 1))
if k.startswith('block11'):
model_dict[k] = v
model_dict[k.replace('block11', 'block12')] = v
model_dict[k.replace('block11', 'block13')] = v
model_dict[k.replace('block11', 'block14')] = v
model_dict[k.replace('block11', 'block15')] = v
model_dict[k.replace('block11', 'block16')] = v
model_dict[k.replace('block11', 'block17')] = v
model_dict[k.replace('block11', 'block18')] = v
model_dict[k.replace('block11', 'block19')] = v
elif k.startswith('block12'):
model_dict[k.replace('block12', 'block20')] = v
elif k.startswith('bn3'):
model_dict[k] = v
model_dict[k.replace('bn3', 'bn4')] = v
elif k.startswith('conv4'):
model_dict[k.replace('conv4', 'conv5')] = v
elif k.startswith('bn4'):
model_dict[k.replace('bn4', 'bn5')] = v
else:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
|
class ASPP_module(nn.Module):
def __init__(self, inplanes, planes, dilation):
super(ASPP_module, self).__init__()
if (dilation == 1):
kernel_size = 1
padding = 0
else:
kernel_size = 3
padding = dilation
self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = BatchNorm2d(planes)
self.relu = nn.ReLU()
self._init_weight()
def forward(self, x):
x = self.atrous_convolution(x)
x = self.bn(x)
return self.relu(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
class DeepLabv3_plus(nn.Module):
def __init__(self, nInputChannels=3, n_classes=21, os=16, pretrained=False, freeze_bn=False, _print=True):
if _print:
print('Constructing DeepLabv3+ model...')
print('Backbone: Xception')
print('Number of classes: {}'.format(n_classes))
print('Output stride: {}'.format(os))
print('Number of Input Channels: {}'.format(nInputChannels))
super(DeepLabv3_plus, self).__init__()
self.xception_features = Xception(nInputChannels, os, pretrained)
if (os == 16):
dilations = [1, 6, 12, 18]
elif (os == 8):
dilations = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = ASPP_module(2048, 256, dilation=dilations[0])
self.aspp2 = ASPP_module(2048, 256, dilation=dilations[1])
self.aspp3 = ASPP_module(2048, 256, dilation=dilations[2])
self.aspp4 = ASPP_module(2048, 256, dilation=dilations[3])
self.relu = nn.ReLU()
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), nn.Conv2d(2048, 256, 1, stride=1, bias=False), BatchNorm2d(256), nn.ReLU())
self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.bn1 = BatchNorm2d(256)
self.conv2 = nn.Conv2d(128, 48, 1, bias=False)
self.bn2 = BatchNorm2d(48)
self.last_conv = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False), BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False), BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, n_classes, kernel_size=1, stride=1))
if freeze_bn:
self._freeze_bn()
def forward(self, input):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = F.interpolate(x, size=(int(math.ceil((input.size()[(- 2)] / 4))), int(math.ceil((input.size()[(- 1)] / 4)))), mode='bilinear', align_corners=True)
low_level_features = self.conv2(low_level_features)
low_level_features = self.bn2(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.last_conv(x)
x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def _freeze_bn(self):
for m in self.modules():
if isinstance(m, BatchNorm2d):
m.eval()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
def get_1x_lr_params(model):
'\n This generator returns all the parameters of the net except for\n the last classification layer. Note that for each batchnorm layer,\n requires_grad is set to False in deeplab_resnet.py, therefore this function does not return\n any batchnorm parameter\n '
b = [model.xception_features]
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
(yield k)
|
def get_10x_lr_params(model):
'\n This generator returns all the parameters for the last layer of the net,\n which does the classification of pixel into classes\n '
b = [model.aspp1, model.aspp2, model.aspp3, model.aspp4, model.conv1, model.conv2, model.last_conv]
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
(yield k)
|
def download_data_from_s3(task):
'Download pde data from s3 to store in temp directory'
s3_base = 'https://pde-xd.s3.amazonaws.com'
download_directory = '.'
if (task == 'darcyflow'):
data_files = ['piececonst_r421_N1024_smooth1.mat', 'piececonst_r421_N1024_smooth2.mat']
s3_path = None
elif (task == 'protein'):
data_files = ['protein.zip']
s3_path = None
elif (task == 'cosmic'):
data_files = ['deepCR.ACS-WFC.train.tar', 'deepCR.ACS-WFC.test.tar']
s3_path = 'cosmic'
else:
raise NotImplementedError
for data_file in data_files:
if (not os.path.exists(data_file)):
if (s3_path is not None):
fileurl = ((((s3_base + '/') + s3_path) + '/') + data_file)
else:
fileurl = ((s3_base + '/') + data_file)
urlretrieve(fileurl, data_file)
return None
|
def main():
task = sys.argv[1]
download_data_from_s3(task)
|
def main(start_epoch, epochs):
assert torch.cuda.is_available(), NotImplementedError('No cuda available ')
if (not osp.exists('data/')):
os.mkdir('data/')
if (not osp.exists('log/')):
os.mkdir('log/')
args = obtain_evaluate_args()
torch.backends.cudnn.benchmark = True
model_fname = 'data/deeplab_{0}_{1}_v3_{2}_epoch%d.pth'.format(args.backbone, args.dataset, args.exp)
if (args.dataset == 'cityscapes'):
dataset = CityscapesSegmentation(args=args, root=Path.db_root_dir(args.dataset), split='reval')
else:
return NotImplementedError
if (args.backbone == 'autodeeplab'):
model = Retrain_Autodeeplab(args)
else:
raise ValueError('Unknown backbone: {}'.format(args.backbone))
if (not args.train):
val_dataloader = DataLoader(dataset, batch_size=16, shuffle=False)
model = torch.nn.DataParallel(model).cuda()
print('======================start evaluate=======================')
for epoch in range(epochs):
print('evaluate epoch {:}'.format((epoch + start_epoch)))
checkpoint_name = (model_fname % (epoch + start_epoch))
print(checkpoint_name)
checkpoint = torch.load(checkpoint_name)
state_dict = {k[7:]: v for (k, v) in checkpoint['state_dict'].items() if ('tracked' not in k)}
model.module.load_state_dict(state_dict)
inter_meter = AverageMeter()
union_meter = AverageMeter()
for (i, sample) in enumerate(val_dataloader):
(inputs, target) = (sample['image'], sample['label'])
(N, H, W) = target.shape
total_outputs = torch.zeros((N, dataset.NUM_CLASSES, H, W)).cuda()
with torch.no_grad():
for (j, scale) in enumerate(args.eval_scales):
new_scale = [int((H * scale)), int((W * scale))]
inputs = F.upsample(inputs, new_scale, mode='bilinear', align_corners=True)
inputs = inputs.cuda()
outputs = model(inputs)
outputs = F.upsample(outputs, (H, W), mode='bilinear', align_corners=True)
total_outputs += outputs
(_, pred) = torch.max(total_outputs, 1)
pred = pred.detach().cpu().numpy().squeeze().astype(np.uint8)
mask = target.numpy().astype(np.uint8)
print('eval: {0}/{1}'.format((i + 1), len(val_dataloader)))
(inter, union) = inter_and_union(pred, mask, len(dataset.CLASSES))
inter_meter.update(inter)
union_meter.update(union)
iou = (inter_meter.sum / (union_meter.sum + 1e-10))
miou = 'epoch: {0} Mean IoU: {1:.2f}'.format(epoch, (iou.mean() * 100))
f = open('log/result.txt', 'a')
for (i, val) in enumerate(iou):
class_iou = 'IoU {0}: {1:.2f}\n'.format(dataset.CLASSES[i], (val * 100))
f.write(class_iou)
f.write('\n')
f.write(miou)
f.write('\n')
f.close()
|
def SeparateConv(C_in, C_out, kernel_size, stride, padding, dilation, bias, BatchNorm):
return nn.Sequential(nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, groups=C_in, bias=False), BatchNorm(C_in), nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False))
|
class _ASPPModule(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation, BatchNorm, separate=False):
super(_ASPPModule, self).__init__()
if separate:
self.atrous_conv = SeparateConv(inplanes, planes, kernel_size, 1, padding, dilation, False, BatchNorm)
else:
self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = BatchNorm(planes)
self._init_weight()
def forward(self, x):
x = self.atrous_conv(x)
x = self.bn(x)
return x
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if (not (ly.bias is None)):
nn.init.constant_(ly.bias, 0)
|
class ASPP_train(nn.Module):
def __init__(self, backbone, output_stride, filter_multiplier=20, steps=5, BatchNorm=ABN, separate=False):
super(ASPP_train, self).__init__()
if (backbone == 'drn'):
inplanes = 512
elif (backbone == 'mobilenet'):
inplanes = 320
elif (backbone == 'autodeeplab'):
inplanes = int(((filter_multiplier * steps) * (output_stride / 4)))
else:
inplanes = 2048
if (output_stride == 16):
dilations = [1, 6, 12, 18]
elif (output_stride == 8):
dilations = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = _ASPPModule(inplanes, 256, 1, padding=0, dilation=dilations[0], BatchNorm=BatchNorm)
self.aspp2 = _ASPPModule(inplanes, 256, 3, padding=dilations[1], dilation=dilations[1], BatchNorm=BatchNorm, separate=separate)
self.aspp3 = _ASPPModule(inplanes, 256, 3, padding=dilations[2], dilation=dilations[2], BatchNorm=BatchNorm, separate=separate)
self.aspp4 = _ASPPModule(inplanes, 256, 3, padding=dilations[3], dilation=dilations[3], BatchNorm=BatchNorm, separate=separate)
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), nn.Conv2d(inplanes, 256, 1, stride=1, bias=False), BatchNorm(256))
self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.bn1 = BatchNorm(256)
self.dropout = nn.Dropout(0.5)
self._init_weight()
def forward(self, x):
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.interpolate(x5, size=x1.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
return self.dropout(x)
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if (not (ly.bias is None)):
nn.init.constant_(ly.bias, 0)
|
def build_aspp(backbone, output_stride, BatchNorm, args, separate):
return ASPP_train(backbone, output_stride, args.filter_multiplier, 5, BatchNorm, separate)
|
def build_backbone(backbone, output_stride, BatchNorm, args):
if (backbone == 'resnet'):
return resnet.ResNet101(output_stride, BatchNorm)
elif (backbone == 'xception'):
return xception.AlignedXception(output_stride, BatchNorm)
elif (backbone == 'drn'):
return drn.drn_d_54(BatchNorm)
elif (backbone == 'mobilenet'):
return mobilenet.MobileNetV2(output_stride, BatchNorm)
elif (backbone == 'autodeeplab'):
return get_default_net(filter_multiplier=args.filter_multiplier)
else:
raise NotImplementedError
|
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, bias=False, dilation=dilation)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=(1, 1), residual=True, BatchNorm=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, padding=dilation[0], dilation=dilation[0])
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, padding=dilation[1], dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.downsample = downsample
self.stride = stride
self.residual = residual
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
if self.residual:
out += residual
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=(1, 1), residual=True, BatchNorm=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=dilation[1], bias=False, dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = BatchNorm((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class DRN(nn.Module):
def __init__(self, block, layers, arch='D', channels=(16, 32, 64, 128, 256, 512, 512, 512), BatchNorm=None):
super(DRN, self).__init__()
self.inplanes = channels[0]
self.out_dim = channels[(- 1)]
self.arch = arch
if (arch == 'C'):
self.conv1 = nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3, bias=False)
self.bn1 = BatchNorm(channels[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(BasicBlock, channels[0], layers[0], stride=1, BatchNorm=BatchNorm)
self.layer2 = self._make_layer(BasicBlock, channels[1], layers[1], stride=2, BatchNorm=BatchNorm)
elif (arch == 'D'):
self.layer0 = nn.Sequential(nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3, bias=False), BatchNorm(channels[0]), nn.ReLU(inplace=True))
self.layer1 = self._make_conv_layers(channels[0], layers[0], stride=1, BatchNorm=BatchNorm)
self.layer2 = self._make_conv_layers(channels[1], layers[1], stride=2, BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, channels[2], layers[2], stride=2, BatchNorm=BatchNorm)
self.layer4 = self._make_layer(block, channels[3], layers[3], stride=2, BatchNorm=BatchNorm)
self.layer5 = self._make_layer(block, channels[4], layers[4], dilation=2, new_level=False, BatchNorm=BatchNorm)
self.layer6 = (None if (layers[5] == 0) else self._make_layer(block, channels[5], layers[5], dilation=4, new_level=False, BatchNorm=BatchNorm))
if (arch == 'C'):
self.layer7 = (None if (layers[6] == 0) else self._make_layer(BasicBlock, channels[6], layers[6], dilation=2, new_level=False, residual=False, BatchNorm=BatchNorm))
self.layer8 = (None if (layers[7] == 0) else self._make_layer(BasicBlock, channels[7], layers[7], dilation=1, new_level=False, residual=False, BatchNorm=BatchNorm))
elif (arch == 'D'):
self.layer7 = (None if (layers[6] == 0) else self._make_conv_layers(channels[6], layers[6], dilation=2, BatchNorm=BatchNorm))
self.layer8 = (None if (layers[7] == 0) else self._make_conv_layers(channels[7], layers[7], dilation=1, BatchNorm=BatchNorm))
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, new_level=True, residual=True, BatchNorm=None):
assert ((dilation == 1) or ((dilation % 2) == 0))
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm((planes * block.expansion)))
layers = list()
layers.append(block(self.inplanes, planes, stride, downsample, dilation=((1, 1) if (dilation == 1) else (((dilation // 2) if new_level else dilation), dilation)), residual=residual, BatchNorm=BatchNorm))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, residual=residual, dilation=(dilation, dilation), BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def _make_conv_layers(self, channels, convs, stride=1, dilation=1, BatchNorm=None):
modules = []
for i in range(convs):
modules.extend([nn.Conv2d(self.inplanes, channels, kernel_size=3, stride=(stride if (i == 0) else 1), padding=dilation, bias=False, dilation=dilation), BatchNorm(channels), nn.ReLU(inplace=True)])
self.inplanes = channels
return nn.Sequential(*modules)
def forward(self, x):
if (self.arch == 'C'):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
elif (self.arch == 'D'):
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
low_level_feat = x
x = self.layer4(x)
x = self.layer5(x)
if (self.layer6 is not None):
x = self.layer6(x)
if (self.layer7 is not None):
x = self.layer7(x)
if (self.layer8 is not None):
x = self.layer8(x)
return (x, low_level_feat)
|
class DRN_A(nn.Module):
def __init__(self, block, layers, BatchNorm=None):
self.inplanes = 64
super(DRN_A, self).__init__()
self.out_dim = (512 * block.expansion)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BatchNorm(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], BatchNorm=BatchNorm)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, BatchNorm=BatchNorm)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, BatchNorm=BatchNorm)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, BatchNorm=BatchNorm))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=(dilation, dilation), BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
|
def drn_a_50(BatchNorm, pretrained=True):
model = DRN_A(Bottleneck, [3, 4, 6, 3], BatchNorm=BatchNorm)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
|
def drn_c_26(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='C', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-c-26'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
|
def drn_c_42(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-c-42'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
|
def drn_c_58(BatchNorm, pretrained=True):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-c-58'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
|
def drn_d_22(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-22'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
|
def drn_d_24(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 2, 2], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-24'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
|
def drn_d_38(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-38'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model
|