code
stringlengths 17
6.64M
|
---|
def train_SN(args):
(data_train, data_dev, data_test) = (load_data(args, 'train'), load_data(args, 'dev'), load_data(args, 'test'))
if args.noise_label:
data_noise = load_data(args, 'test_noise')
else:
data_noise = None
args.label_size = len(data_test.relId2labelId)
args.ner_label_size = len(data_test.ner2id)
trainer = Trainer(args, data_train, data_dev, data_test, data_noise)
if (args.mode == 'train'):
trainer.train_batch()
trainer.test_batch()
elif (args.mode == 'test'):
try:
trainer.load_model(args.load_model_name)
print('Loading model from {}'.format(args.load_model_name))
except:
trainer.load_model(args.save_model_name)
print('Loading model from {}'.format(args.save_model_name))
eval_result = trainer.test_batch(data='dev')
eval_result = trainer.test_batch()
|
class Trainer(nn.Module):
def __init__(self, options, data_train, data_dev, data_test, data_noise):
super(Trainer, self).__init__()
self.options = options
self.batch_size = options.batch_size
self.save_path = options.save_model_name
self.device = options.gpu
self.n_device = options.n_gpu
self.test_noise = options.noise_label
self.n_initial_epoch = 15
self.n_iter_epoch = 10
self.n_iter_num = 15
self.n_posi_epoch = 15
self.options.random = False
if (data_train is not None):
self.data_train = data_train
self.id2label = data_train.labelId2rel
train_batch_data = data_train.batchify()
self.ori_train_labels = [d[(- 1)].item() for d in train_batch_data]
self.train_dl = DataLoader(train_batch_data, sampler=RandomSampler(train_batch_data), batch_size=args.batch_size)
if self.test_noise:
self.data_noise = data_noise
noise_batch_data = data_noise.batchify(noise_label=True)
for idx in range(len(noise_batch_data)):
if ((noise_batch_data[idx][(- 2)] == 0) and noise_batch_data[idx][(- 1)]):
noise_batch_data[idx][(- 1)] = False
self.test_noise_dl = DataLoader(noise_batch_data, sampler=SequentialSampler(noise_batch_data), batch_size=args.batch_size)
self.pred_noise = ([0] * len(noise_batch_data))
self.data_dev = data_dev
self.data_test = data_test
self.eval_interval = 10
test_batch_data = data_test.batchify()
dev_batch_data = data_dev.batchify()
self.test_dl = DataLoader(test_batch_data, sampler=SequentialSampler(test_batch_data), batch_size=args.batch_size)
self.dev_dl = DataLoader(dev_batch_data, sampler=SequentialSampler(dev_batch_data), batch_size=args.batch_size)
self.initialize()
def initialize(self):
torch.manual_seed(1)
print('Initializing model...')
self.options.vocab_size = self.data_test.get_vocab_size()
self.SENTmodel = SENT_Model(self.options, vocab_file=self.options.word2vec_file)
if (self.n_device > 1):
self.SENTmodel = torch.nn.DataParallel(self.SENTmodel)
if self.device:
self.SENTmodel = self.SENTmodel.to(self.device)
weight_decay = self.options.weight_decay
self.para_list = [p for p in self.SENTmodel.parameters() if p.requires_grad]
self.optimizer = torch.optim.Adam(self.para_list, lr=self.options.lr, weight_decay=weight_decay)
def train_batch(self):
self.options.random = False
print('------------ Start Initial Training -------------')
self.save_path = (self.save_path + '-N0')
self.train_epoch(self.n_initial_epoch, negloss=True, metric='train')
print('------------- Start Iterative Training -------------')
for iter in range(self.n_iter_num):
print('Iterative Training Phase {}'.format((iter + 1)))
self.filter_relabel(prob_threshold=0.25, cutrate=0.01, relabel_rate=0.7)
self.test_denoise(prob_threshold=0.25, cutrate=0.01)
self.initialize()
self.save_path = ('-'.join(self.save_path.split('-')[:(- 1)]) + f'-N{(iter + 1)}')
self.train_epoch(self.n_iter_epoch, negloss=True, metric='dev')
print('------------- Start Positive Training -------------')
self.filter_relabel(0.25, cutrate=0.0, relabel_rate=0.7)
self.test_denoise(0.25, cutrate=0.0)
self.options.random = True
self.initialize()
print('Start Positive Training')
self.save_path = ('-'.join(self.save_path.split('-')[:(- 1)]) + '-P')
self.train_epoch(self.n_posi_epoch, negloss=False, metric='dev', test=True)
def train_epoch(self, epoch_num, negloss=False, test=False, metric='dev'):
best_metric = (- 1)
for epoch in range(epoch_num):
total_loss = 0.0
all_right = 0.0
all_total = 0.0
all_pos_right = 0.0
all_pos_total = 0.0
idx = 0
predictions = []
true_labels = []
print('------epoch {}/{}------'.format((epoch + 1), epoch_num))
for (i, train_batch) in tqdm(enumerate(self.train_dl)):
self.SENTmodel.train()
idx += train_batch[0].size(0)
train_batch = [i.to(self.device) for i in train_batch]
(loss, preds, right, total, pos_right, probs, _) = self.SENTmodel(train_batch, negloss=negloss)
loss = loss.mean()
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
total_loss += loss.item()
all_right += right.sum().item()
all_total += total.sum().item()
all_pos_right += pos_right.sum().item()
all_pos_total += (train_batch[(- 1)] != 0).sum().item()
predictions += preds.cpu().squeeze().tolist()
true_labels += train_batch[(- 1)].cpu().squeeze().tolist()
acc = (all_right / all_total)
pos_acc = (all_pos_right / all_pos_total)
print('Epoch {} finished, total loss={}, distant acc={}, pos acc={} '.format(epoch, total_loss, acc, pos_acc))
predictions_ = []
true_labels_ = []
for (pred, true) in zip(predictions, true_labels):
predictions_.append(self.data_test.id2rel(pred))
true_labels_.append(self.data_test.id2rel(true))
(train_p, train_r, train_f1) = scorer.score(true_labels_, predictions_, False, 'NA')
print('Training set P={}, R={}, F1={} on real labels'.format(train_p, train_r, train_f1))
(dev_acc, dev_f1) = self.test_batch('dev')
if test:
(test_acc, test_f1) = self.test_batch('test')
if (metric == 'train'):
if (train_f1 > best_metric):
best_metric = train_f1
torch.save(self.SENTmodel.state_dict(), self.save_path)
print('Saving model to {}...'.format(self.save_path))
elif (dev_f1 > best_metric):
best_metric = dev_f1
torch.save(self.SENTmodel.state_dict(), self.save_path)
print('Saving model to {}...'.format(self.save_path))
def filter_relabel(self, prob_threshold=0.0, cutrate=(- 1), relabel_rate=(- 1), convergence_rate=0.99):
print('Filtering with label rank...')
print('Loading model from {}'.format(self.save_path))
self.SENTmodel.load_state_dict(torch.load(self.save_path))
self.SENTmodel.eval()
train_batch_data = self.train_dl.dataset
for i in range(len(train_batch_data)):
train_batch_data[i][(- 1)][:] = self.ori_train_labels[i]
self.train_dl = DataLoader(train_batch_data, batch_size=self.batch_size, sampler=SequentialSampler(train_batch_data))
all_probs = []
max_probs = []
all_preds = []
for (i, train_batch) in tqdm(enumerate(self.train_dl)):
train_batch = [i.cuda() for i in train_batch]
with torch.no_grad():
(loss, preds, right, total, r_right, probs, label_probs) = self.SENTmodel(train_batch, mode='test')
all_probs += probs.view((- 1)).cpu().numpy().tolist()
max_probs += label_probs.max((- 1))[0].view((- 1)).cpu().numpy().tolist()
all_preds += preds.view((- 1)).cpu().numpy().tolist()
filtered_dataset = self.train_dl.dataset
noisy_data_index = []
filtered_index = []
prob_dict = defaultdict(list)
for (index, (data, prob)) in enumerate(zip(self.train_dl.dataset, all_probs)):
prob_dict[data[(- 1)].item()].append([index, prob])
relabel_cnt = defaultdict(int)
prob_dict = {label: sorted(probs, key=(lambda x: x[1])) for (label, probs) in prob_dict.items()}
for (label, sorted_probs) in prob_dict.items():
th = 0
prob = 0
if ((cutrate > 0) and (sorted_probs[(- 1)][1] < cutrate)):
pass
else:
prob = (((2 * prob_threshold) * sorted_probs[(- 1)][1]) if (sorted_probs[(- 1)][1] > convergence_rate) else (prob_threshold * sorted_probs[(- 1)][1]))
for (i, (index, p)) in enumerate(sorted_probs):
if (p < prob):
noisy_data_index.append(index)
th += 1
if (relabel_rate > 0):
if (max_probs[index] > relabel_rate):
filtered_dataset[index][(- 1)][:] = all_preds[index]
relabel_cnt[all_preds[index]] += 1
else:
filtered_dataset[index][(- 1)][:] = 0
relabel_cnt[0] += 1
else:
filtered_dataset[index][(- 1)][:] = 0
else:
filtered_index.append(index)
print('Filtering {}/{} instance with label {}, threshold prob={}, max prob={}'.format(th, len(prob_dict[label]), self.id2label[label], prob, sorted_probs[(- 1)][1]))
print('-----------Relabel detail------------')
for (key, value) in relabel_cnt.items():
print('Relabel {} for label {}, Th={}'.format(value, self.id2label[key], prob_dict[key][(- 1)][1]))
pred_noise_num = float(len(noisy_data_index))
print('Deleting {} noisy instances with threshold={}'.format(pred_noise_num, prob_threshold))
self.train_dl = DataLoader(filtered_dataset, batch_size=self.batch_size, sampler=RandomSampler(filtered_dataset))
def test_denoise(self, prob_threshold=0.0, cutrate=(- 1), convergence_rate=0.99, retain=True):
print('Test denoise ability')
print('Loading model from {}'.format(self.save_path))
self.SENTmodel.load_state_dict(torch.load(self.save_path))
self.SENTmodel.eval()
if retain:
ori_noise = copy.deepcopy(self.pred_noise)
all_probs = []
max_probs = []
all_preds = []
for (i, train_batch) in tqdm(enumerate(self.test_noise_dl)):
(train_batch, is_noise) = (train_batch[:(- 1)], train_batch[(- 1)])
train_batch = [i.cuda() for i in train_batch]
with torch.no_grad():
(loss, preds, right, total, r_right, probs, label_probs) = self.SENTmodel(train_batch, mode='test')
all_probs += probs.view((- 1)).cpu().numpy().tolist()
max_probs += label_probs.max((- 1))[0].view((- 1)).cpu().numpy().tolist()
all_preds += preds.view((- 1)).cpu().numpy().tolist()
noisy_data_index = []
filtered_index = []
prob_dict = defaultdict(list)
for (index, (data, prob)) in enumerate(zip(self.test_noise_dl.dataset, all_probs)):
prob_dict[data[(- 2)].item()].append([index, prob])
prob_dict = {label: sorted(probs, key=(lambda x: x[1])) for (label, probs) in prob_dict.items()}
for (label, sorted_probs) in prob_dict.items():
th = 0
right = 0.0
gold = 0.0
pred = 0.0
prob = 0.0
if ((cutrate > 0) and (sorted_probs[(- 1)][1] < cutrate)):
for (index, p) in sorted_probs:
gold += self.test_noise_dl.dataset[index][(- 1)]
else:
prob = (((2 * prob_threshold) * sorted_probs[(- 1)][1]) if (sorted_probs[(- 1)][1] > convergence_rate) else (prob_threshold * sorted_probs[(- 1)][1]))
for (i, (index, p)) in enumerate(sorted_probs):
gold += self.test_noise_dl.dataset[index][(- 1)]
if (p < prob):
noisy_data_index.append(index)
th += 1
right += self.test_noise_dl.dataset[index][(- 1)]
pred += 1
if (self.test_noise_dl.dataset[index][(- 2)] != 0):
self.pred_noise[index] = 1
else:
filtered_index.append(index)
if (label == 0):
right = 0.0
pred = 0.0
gold = 0.0
if (pred == 0):
p = 0.0
else:
p = (float(right) / float(pred))
if (gold == 0):
r = 0.0
else:
r = (float(right) / float(gold))
print('Filtering {}/{} instance with label {}, threshold prob={}, max prob={}, p={}, r={}'.format(th, len(prob_dict[label]), self.id2label[label], prob, sorted_probs[(- 1)][1], p, r))
real_noise_num = 0.0
right_noise_num = 0.0
pred_noise_num = 0.0
for idx in range(len(self.test_noise_dl.dataset)):
if self.test_noise_dl.dataset[idx][(- 1)]:
real_noise_num += 1
if (self.pred_noise[idx] == 1):
right_noise_num += 1
if (self.pred_noise[idx] == 1):
pred_noise_num += 1
if (pred_noise_num == 0):
noise_p = 0.0
else:
noise_p = (right_noise_num / pred_noise_num)
if (real_noise_num == 0):
noise_r = 0.0
else:
noise_r = (right_noise_num / real_noise_num)
print('Deleting {} noisy instances with threshold={}, p={}, r={}'.format(pred_noise_num, prob_threshold, noise_p, noise_r))
if retain:
self.pred_noise = ori_noise
def test_batch(self, data='test', detail=False):
if (data == 'test'):
print('-----testing phrase-----')
dl = self.test_dl
elif (data == 'dev'):
print('------dev phrase------')
dl = self.dev_dl
predictions = []
true_labels = []
all_right = 0.0
all_total = 0.0
all_pos_right = 0.0
all_pos_total = 0.0
all_probs = []
for (i, test_batch) in tqdm(enumerate(dl)):
self.SENTmodel.eval()
test_batch = [i.cuda() for i in test_batch]
with torch.no_grad():
(loss, preds, right, total, pos_right, probs, _) = self.SENTmodel(test_batch, mode='test')
predictions += preds.cpu().squeeze().tolist()
true_labels += test_batch[(- 1)].cpu().squeeze().tolist()
all_right += right.sum().item()
all_total += total.sum().item()
all_pos_right += pos_right.sum().item()
all_pos_total += (test_batch[(- 1)] != 0).sum().item()
all_probs += probs.cpu().numpy().tolist()
acc = (all_right / all_total)
pos_acc = (all_pos_right / all_pos_total)
print('Finish evaluating on {} set, distant acc={}, pos acc={} '.format(data, acc, pos_acc))
predictions = [self.data_test.id2rel(i) for i in predictions]
true_labels = [self.data_test.id2rel(i) for i in true_labels]
(test_p, test_r, test_f1) = scorer.score(true_labels, predictions, detail, 'NA')
print('P={}, R={}, F1={}'.format(test_p, test_r, test_f1))
return (pos_acc, test_f1)
def load_model(self, load_path):
if os.path.exists(load_path):
self.SENTmodel.load_state_dict(torch.load(load_path))
print('Loading model from {}...'.format(load_path))
elif os.path.exists(self.save_path):
self.SENTmodel.load_state_dict(torch.load(self.save_path))
print('Loading model from {}...'.format(self.save_path))
else:
print('Fail to load model from {}'.format(load_path))
return
|
def load_data(args, mode='train'):
data_path = (((args.save_data_path + '.') + mode) + '.data')
if os.path.exists(data_path):
print('Loading {} data from {}...'.format(mode, data_path))
with open(data_path, 'rb') as f:
data = pickle.load(f)
else:
data = Data(args, mode)
print('Saving {} data to {}...'.format(mode, data_path))
with open(data_path, 'wb') as f:
pickle.dump(data, f)
return data
|
def train_SN(args):
(data_train, data_dev, data_test) = (load_data(args, 'train'), load_data(args, 'dev'), load_data(args, 'test'))
if args.noise_label:
data_noise = load_data(args, 'test_noise')
else:
data_noise = None
args.label_size = len(data_test.relId2labelId)
args.ner_label_size = len(data_test.ner2id)
trainer = Trainer(args, data_train, data_dev, data_test, data_noise)
if (args.mode == 'train'):
trainer.train_batch()
trainer.test_batch()
elif (args.mode == 'test'):
try:
trainer.load_model(args.load_model_name)
print('Loading model from {}'.format(args.load_model_name))
except:
trainer.load_model(args.save_model_name)
print('Loading model from {}'.format(args.save_model_name))
eval_result = trainer.test_batch(data='dev')
eval_result = trainer.test_batch()
|
class Trainer(nn.Module):
def __init__(self, options, data_train, data_dev, data_test, data_noise):
super(Trainer, self).__init__()
self.options = options
self.batch_size = options.batch_size
self.save_path = options.save_model_name
self.device = options.gpu
self.n_device = options.n_gpu
self.test_noise = options.noise_label
self.n_initial_epoch = 50
self.n_iter_epoch = 50
self.n_iter_num = 4
self.n_posi_epoch = 50
self.options.random = False
self.dataset = options.dataset
if (data_train is not None):
self.data_train = data_train
self.id2label = data_train.labelId2rel
train_batch_data = data_train.batchify()
self.ori_train_labels = [d[(- 1)].item() for d in train_batch_data]
self.train_dl = DataLoader(train_batch_data, sampler=RandomSampler(train_batch_data), batch_size=args.batch_size)
if self.test_noise:
self.data_noise = data_noise
noise_batch_data = data_noise.batchify(noise_label=True)
for idx in range(len(noise_batch_data)):
if ((noise_batch_data[idx][(- 2)] == 0) and noise_batch_data[idx][(- 1)]):
noise_batch_data[idx][(- 1)] = False
self.test_noise_dl = DataLoader(noise_batch_data, sampler=SequentialSampler(noise_batch_data), batch_size=args.batch_size)
self.pred_noise = ([0] * len(noise_batch_data))
self.data_dev = data_dev
self.data_test = data_test
self.eval_interval = 10
test_batch_data = data_test.batchify()
dev_batch_data = data_dev.batchify()
self.test_dl = DataLoader(test_batch_data, sampler=SequentialSampler(test_batch_data), batch_size=args.batch_size)
self.dev_dl = DataLoader(dev_batch_data, sampler=SequentialSampler(dev_batch_data), batch_size=args.batch_size)
self.initialize()
def initialize(self):
print('Initializing model...')
self.options.vocab_size = self.data_test.get_vocab_size()
self.SENTmodel = SENT_Model(self.options, vocab_file=self.options.word2vec_file)
if (self.n_device > 1):
self.SENTmodel = torch.nn.DataParallel(self.SENTmodel)
if self.device:
self.SENTmodel = self.SENTmodel.to(self.device)
weight_decay = self.options.weight_decay
self.para_list = [p for p in self.SENTmodel.parameters() if p.requires_grad]
if (self.options.optim == 'sgd'):
self.optimizer = torch.optim.SGD(self.SENTmodel.parameters(), lr=self.options.lr)
elif (self.options.optim == 'adam'):
self.optimizer = torch.optim.Adam(self.para_list, lr=self.options.lr, weight_decay=weight_decay)
def train_batch(self):
self.options.random = False
print('------------ Start Initial Training -------------')
self.save_path = (self.save_path + '-N0')
self.train_epoch(self.n_initial_epoch, negloss=True, metric='train', test=True)
print('------------- Start Iterative Training -------------')
for iter in range(self.n_iter_num):
print('Iterative Training Phase {}'.format((iter + 1)))
self.filter_relabel_tacred(prob_threshold=0.15, cutrate=0.01, relabel_rate=0.8)
self.initialize()
self.save_path = ('-'.join(self.save_path.split('-')[:(- 1)]) + f'-N{(iter + 1)}')
self.train_epoch(self.n_iter_epoch, negloss=True, metric='dev', test=True)
print('------------- Start Positive Training -------------')
self.batch_size = 50
self.filter_relabel_tacred(0.15, cutrate=0.0, relabel_rate=0.8)
self.initialize()
print('Start Positive Training')
self.save_path = ('-'.join(self.save_path.split('-')[:(- 1)]) + '-P')
self.train_epoch(self.n_posi_epoch, negloss=False, metric='dev', test=True)
def train_epoch(self, epoch_num, negloss=False, test=False, metric='dev'):
best_metric = (- 1)
for epoch in range(epoch_num):
total_loss = 0.0
all_right = 0.0
all_total = 0.0
all_pos_right = 0.0
all_pos_total = 0.0
idx = 0
predictions = []
true_labels = []
true_D_labels = []
print('------epoch {}/{}------'.format((epoch + 1), epoch_num))
for (i, train_batch) in tqdm(enumerate(self.train_dl)):
self.SENTmodel.train()
idx += train_batch[0].size(0)
train_batch = [i.to(self.device) for i in train_batch]
(loss, preds, right, total, pos_right, probs, _) = self.SENTmodel(train_batch, negloss=negloss)
loss = loss.mean()
loss.backward()
if (self.options.optim == 'sgd'):
torch.nn.utils.clip_grad_norm_(self.SENTmodel.parameters(), 5.0)
self.optimizer.step()
self.optimizer.zero_grad()
total_loss += loss.item()
all_right += right.sum().item()
all_total += total.sum().item()
all_pos_right += pos_right.sum().item()
all_pos_total += (train_batch[(- 1)] != 0).sum().item()
predictions += preds.cpu().squeeze().tolist()
true_labels += train_batch[(- 2)].cpu().squeeze().tolist()
true_D_labels += train_batch[(- 1)].cpu().squeeze().tolist()
acc = (all_right / all_total)
pos_acc = (all_pos_right / all_pos_total)
print('Epoch {} finished, total loss={}, distant acc={}, pos acc={} '.format(epoch, total_loss, acc, pos_acc))
predictions_ = []
true_labels_ = []
true_D_labels_ = []
for (pred, true, trued) in zip(predictions, true_labels, true_D_labels):
predictions_.append(self.data_test.id2rel(pred))
true_labels_.append(self.data_test.id2rel(true))
true_D_labels_.append(self.data_test.id2rel(trued))
(train_p, train_r, train_f1) = scorer.score(true_labels_, predictions_, False, 'no_relation')
print('Training set P={}, R={}, F1={} on real labels'.format(train_p, train_r, train_f1))
(train_p, train_r, train_f1) = scorer.score(true_D_labels_, predictions_, False, 'no_relation')
print('Training set P={}, R={}, F1={} on noisy labels'.format(train_p, train_r, train_f1))
(dev_acc, dev_f1) = self.test_batch('dev')
if test:
(test_acc, test_f1) = self.test_batch('test')
if (metric == 'train'):
if (train_f1 > best_metric):
best_metric = train_f1
torch.save(self.SENTmodel.state_dict(), self.save_path)
print('Saving model to {}...'.format(self.save_path))
elif (dev_f1 > best_metric):
best_metric = dev_f1
torch.save(self.SENTmodel.state_dict(), self.save_path)
print('Saving model to {}...'.format(self.save_path))
def filter_relabel(self, prob_threshold=0.0, cutrate=(- 1), relabel_rate=(- 1), convergence_rate=0.99):
print('Filtering with label rank...')
print('Loading model from {}'.format(self.save_path))
self.SENTmodel.load_state_dict(torch.load(self.save_path))
self.SENTmodel.eval()
train_batch_data = self.train_dl.dataset
for i in range(len(train_batch_data)):
train_batch_data[i][(- 1)][:] = self.ori_train_labels[i]
self.train_dl = DataLoader(train_batch_data, batch_size=self.batch_size, sampler=SequentialSampler(train_batch_data))
all_probs = []
max_probs = []
all_preds = []
for (i, train_batch) in tqdm(enumerate(self.train_dl)):
train_batch = [i.cuda() for i in train_batch]
with torch.no_grad():
(loss, preds, right, total, r_right, probs, label_probs) = self.SENTmodel(train_batch, mode='test')
all_probs += probs.view((- 1)).cpu().numpy().tolist()
max_probs += label_probs.max((- 1))[0].view((- 1)).cpu().numpy().tolist()
all_preds += preds.view((- 1)).cpu().numpy().tolist()
filtered_dataset = self.train_dl.dataset
noisy_data_index = []
filtered_index = []
prob_dict = defaultdict(list)
for (index, (data, prob)) in enumerate(zip(self.train_dl.dataset, all_probs)):
prob_dict[data[(- 1)].item()].append([index, prob])
relabel_cnt = defaultdict(int)
prob_dict = {label: sorted(probs, key=(lambda x: x[1])) for (label, probs) in prob_dict.items()}
for (label, sorted_probs) in prob_dict.items():
th = 0
prob = 0
if ((cutrate > 0) and (sorted_probs[(- 1)][1] < cutrate)):
pass
else:
prob = (((2 * prob_threshold) * sorted_probs[(- 1)][1]) if (sorted_probs[(- 1)][1] > convergence_rate) else (prob_threshold * sorted_probs[(- 1)][1]))
for (i, (index, p)) in enumerate(sorted_probs):
if (p < prob):
noisy_data_index.append(index)
th += 1
if (relabel_rate > 0):
if (max_probs[index] > relabel_rate):
filtered_dataset[index][(- 1)][:] = all_preds[index]
relabel_cnt[all_preds[index]] += 1
else:
filtered_dataset[index][(- 1)][:] = 0
relabel_cnt[0] += 1
else:
filtered_dataset[index][(- 1)][:] = 0
else:
filtered_index.append(index)
print('Filtering {}/{} instance with label {}, threshold prob={}, max prob={}'.format(th, len(prob_dict[label]), self.id2label[label], prob, sorted_probs[(- 1)][1]))
print('-----------Relabel detail------------')
for (key, value) in relabel_cnt.items():
print('Relabel {} for label {}, Th={}'.format(value, self.id2label[key], prob_dict[key][(- 1)][1]))
pred_noise_num = float(len(noisy_data_index))
print('Deleting {} noisy instances with threshold={}'.format(pred_noise_num, prob_threshold))
self.train_dl = DataLoader(filtered_dataset, batch_size=self.batch_size, sampler=RandomSampler(filtered_dataset))
def test_denoise(self, prob_threshold=0.0, cutrate=(- 1), convergence_rate=0.99, retain=True):
print('Test denoise ability')
print('Loading model from {}'.format(self.save_path))
self.SENTmodel.load_state_dict(torch.load(self.save_path))
self.SENTmodel.eval()
if retain:
ori_noise = copy.deepcopy(self.pred_noise)
all_probs = []
max_probs = []
all_preds = []
for (i, train_batch) in tqdm(enumerate(self.test_noise_dl)):
(train_batch, is_noise) = (train_batch[:(- 1)], train_batch[(- 1)])
train_batch = [i.cuda() for i in train_batch]
with torch.no_grad():
(loss, preds, right, total, r_right, probs, label_probs) = self.SENTmodel(train_batch, mode='test')
all_probs += probs.view((- 1)).cpu().numpy().tolist()
max_probs += label_probs.max((- 1))[0].view((- 1)).cpu().numpy().tolist()
all_preds += preds.view((- 1)).cpu().numpy().tolist()
noisy_data_index = []
filtered_index = []
prob_dict = defaultdict(list)
for (index, (data, prob)) in enumerate(zip(self.test_noise_dl.dataset, all_probs)):
prob_dict[data[(- 2)].item()].append([index, prob])
prob_dict = {label: sorted(probs, key=(lambda x: x[1])) for (label, probs) in prob_dict.items()}
for (label, sorted_probs) in prob_dict.items():
th = 0
right = 0.0
gold = 0.0
pred = 0.0
prob = 0.0
if ((cutrate > 0) and (sorted_probs[(- 1)][1] < cutrate)):
for (index, p) in sorted_probs:
gold += self.test_noise_dl.dataset[index][(- 1)]
else:
prob = (((2 * prob_threshold) * sorted_probs[(- 1)][1]) if (sorted_probs[(- 1)][1] > convergence_rate) else (prob_threshold * sorted_probs[(- 1)][1]))
for (i, (index, p)) in enumerate(sorted_probs):
gold += self.test_noise_dl.dataset[index][(- 1)]
if (p < prob):
noisy_data_index.append(index)
th += 1
right += self.test_noise_dl.dataset[index][(- 1)]
pred += 1
if (self.test_noise_dl.dataset[index][(- 2)] != 0):
self.pred_noise[index] = 1
else:
filtered_index.append(index)
if (label == 0):
right = 0.0
pred = 0.0
gold = 0.0
if (pred == 0):
p = 0.0
else:
p = (float(right) / float(pred))
if (gold == 0):
r = 0.0
else:
r = (float(right) / float(gold))
print('Filtering {}/{} instance with label {}, threshold prob={}, max prob={}, p={}, r={}'.format(th, len(prob_dict[label]), self.id2label[label], prob, sorted_probs[(- 1)][1], p, r))
real_noise_num = 0.0
right_noise_num = 0.0
pred_noise_num = 0.0
for idx in range(len(self.test_noise_dl.dataset)):
if self.test_noise_dl.dataset[idx][(- 1)]:
real_noise_num += 1
if (self.pred_noise[idx] == 1):
right_noise_num += 1
if (self.pred_noise[idx] == 1):
pred_noise_num += 1
if (pred_noise_num == 0):
noise_p = 0.0
else:
noise_p = (right_noise_num / pred_noise_num)
if (real_noise_num == 0):
noise_r = 0.0
else:
noise_r = (right_noise_num / real_noise_num)
print('Deleting {} noisy instances with threshold={}, p={}, r={}'.format(pred_noise_num, prob_threshold, noise_p, noise_r))
if retain:
self.pred_noise = ori_noise
def filter_relabel_tacred(self, prob_threshold=0.0, cutrate=(- 1), relabel_rate=(- 1), convergence_rate=0.99):
print('Filtering with label rank...')
print('Loading model from {}'.format(self.save_path))
self.SENTmodel.load_state_dict(torch.load(self.save_path))
self.SENTmodel.eval()
train_batch_data = self.train_dl.dataset
for i in range(len(train_batch_data)):
train_batch_data[i][(- 1)][:] = self.ori_train_labels[i]
self.train_dl = DataLoader(train_batch_data, batch_size=self.batch_size, sampler=SequentialSampler(train_batch_data))
all_probs = []
max_probs = []
all_preds = []
for (i, train_batch) in tqdm(enumerate(self.train_dl)):
train_batch = [i.cuda() for i in train_batch]
with torch.no_grad():
(loss, preds, right, total, r_right, probs, label_probs) = self.SENTmodel(train_batch, mode='test')
all_probs += probs.view((- 1)).cpu().numpy().tolist()
max_probs += label_probs.max((- 1))[0].view((- 1)).cpu().numpy().tolist()
all_preds += preds.view((- 1)).cpu().numpy().tolist()
filtered_dataset = self.train_dl.dataset
noisy_data_index = []
filtered_index = []
prob_dict = defaultdict(list)
for (index, (data, prob)) in enumerate(zip(self.train_dl.dataset, all_probs)):
prob_dict[data[(- 1)].item()].append([index, prob])
relabel_cnt = defaultdict(int)
relabel_right = defaultdict(int)
relabel_right[0] = 0
prob_dict = {label: sorted(probs, key=(lambda x: x[1])) for (label, probs) in prob_dict.items()}
label_num = [len(prob_dict[i]) for i in range(len(prob_dict))]
right_num = 0.0
noise_num = 0.0
pred_num = 0.0
relabel_prob_dict = defaultdict(list)
for (label, sorted_probs) in prob_dict.items():
th = 0
pred = 0.0
right = 0.0
gold = 0.0
if ((cutrate > 0) and (sorted_probs[(- 1)][1] < cutrate)):
for (index, p) in sorted_probs:
if (self.train_dl.dataset[index][(- 1)] != self.train_dl.dataset[index][(- 2)]):
gold += 1
else:
prob = (((2 * prob_threshold) * sorted_probs[(- 1)][1]) if (sorted_probs[(- 1)][1] > convergence_rate) else (prob_threshold * sorted_probs[(- 1)][1]))
for (i, (index, p)) in enumerate(sorted_probs):
if (self.train_dl.dataset[index][(- 1)] != self.train_dl.dataset[index][(- 2)]):
gold += 1
if (p < prob):
noisy_data_index.append(index)
relabel_prob_dict[all_preds[index]].append([index, max_probs[index]])
th += 1
pred += 1
if (self.train_dl.dataset[index][(- 1)] != self.train_dl.dataset[index][(- 2)]):
right += 1
if (relabel_rate > 0):
if (max_probs[index] > relabel_rate):
filtered_dataset[index][(- 1)][:] = all_preds[index]
relabel_cnt[all_preds[index]] += 1
if (filtered_dataset[index][(- 2)].item() == all_preds[index]):
relabel_right[all_preds[index]] += 1
else:
filtered_dataset[index][(- 1)][:] = 0
relabel_cnt[0] += 1
else:
filtered_dataset[index][(- 1)][:] = 0
relabel_cnt[0] += 1
if (filtered_dataset[index][(- 2)].item() == 0):
relabel_right[0] += 1
else:
filtered_index.append(index)
pred_num += pred
noise_num += gold
right_num += right
if (pred == 0):
p = 0.0
else:
p = (float(right) / float(pred))
if (gold == 0):
r = 0.0
else:
r = (float(right) / float(gold))
print('Filtering {}/{} instance with label {}, threshold prob={}, max prob={}, p={}, r={}'.format(th, len(prob_dict[label]), self.id2label[label], prob, sorted_probs[(- 1)][1], p, r))
if (pred_num == 0):
noise_p = 0.0
else:
noise_p = (right_num / pred_num)
if (noise_num == 0):
noise_r = 0.0
else:
noise_r = (right_num / noise_num)
print('Deleting {} noisy instances with threshold={}, p={}, r={}'.format(pred_num, prob_threshold, noise_p, noise_r))
print('-----------Relabel detail------------')
all_relabel_right = 0.0
all_relabel = 0.0
for (key, value) in relabel_cnt.items():
right_value = relabel_right[key]
if (key != 0):
all_relabel += value
all_relabel_right += right_value
acc = (float(right_value) / float(value))
if (acc > 1):
print(right_value, value)
print('Relabel {} for label {}, Th={}, acc={}'.format(value, self.id2label[key], prob_dict[key][(- 1)][1], acc))
if (all_relabel != 0):
print('Relabel {} ins, acc={}'.format(all_relabel, (all_relabel_right / all_relabel)))
pred_noise_num = float(len(noisy_data_index))
print('Deleting {} noisy instances with threshold={}'.format(pred_noise_num, prob_threshold))
self.train_dl = DataLoader(filtered_dataset, batch_size=self.batch_size, sampler=RandomSampler(filtered_dataset))
def test_batch(self, data='test', detail=False):
if (data == 'test'):
print('-----testing phrase-----')
dl = self.test_dl
elif (data == 'dev'):
print('------dev phrase------')
dl = self.dev_dl
predictions = []
true_labels = []
all_right = 0.0
all_total = 0.0
all_pos_right = 0.0
all_pos_total = 0.0
all_probs = []
for (i, test_batch) in tqdm(enumerate(dl)):
self.SENTmodel.eval()
test_batch = [i.cuda() for i in test_batch]
with torch.no_grad():
(loss, preds, right, total, pos_right, probs, _) = self.SENTmodel(test_batch, mode='test')
predictions += preds.cpu().squeeze().tolist()
if (self.dataset == 'tacred'):
true_labels += test_batch[(- 2)].cpu().squeeze().tolist()
else:
true_labels += test_batch[(- 1)].cpu().squeeze().tolist()
all_right += right.sum().item()
all_total += total.sum().item()
all_pos_right += pos_right.sum().item()
all_pos_total += (test_batch[(- 1)] != 0).sum().item()
all_probs += probs.cpu().numpy().tolist()
acc = (all_right / all_total)
pos_acc = (all_pos_right / all_pos_total)
print('Finish evaluating on {} set, distant acc={}, pos acc={} '.format(data, acc, pos_acc))
predictions = [self.data_test.id2rel(i) for i in predictions]
true_labels = [self.data_test.id2rel(i) for i in true_labels]
if (self.dataset == 'tacred'):
(test_p, test_r, test_f1) = scorer.score(true_labels, predictions, detail, 'no_relation')
else:
(test_p, test_r, test_f1) = scorer.score(true_labels, predictions, detail, 'NA')
print('P={}, R={}, F1={}'.format(test_p, test_r, test_f1))
return (pos_acc, test_f1)
def load_model(self, load_path):
if os.path.exists(load_path):
self.SENTmodel.load_state_dict(torch.load(load_path))
print('Loading model from {}...'.format(load_path))
elif os.path.exists(self.save_path):
self.SENTmodel.load_state_dict(torch.load(self.save_path))
print('Loading model from {}...'.format(self.save_path))
else:
print('Fail to load model from {}'.format(load_path))
return
|
def extract_features(strv):
feat = np.array(([0.0] * 6))
for i in range(6):
tmp = strv[i].split(':')
feat_index = int(tmp[0])
feat_value = float(tmp[1])
feat[(feat_index - 1)] = feat_value
return feat
|
def parse_line(line):
line = line.strip()
line = line.split('|')
decision_info = line[0].split(' ')
timestamp = int(decision_info[0])
offered_ad_id = int(decision_info[1])
click = int(decision_info[2])
user_info = line[1].split(' ')[1:]
user_feat = extract_features(user_info)
ad_info = line[2].split(' ')
eligible_ads_ids = np.array([int(ad_info[0])])
eligible_ads_feat = np.array([extract_features(ad_info[1:])])
for i in range(3, len(line)):
ad_info = line[i].split(' ')
if (len(ad_info[1:]) >= 6):
eligible_ads_ids = np.append(eligible_ads_ids, np.array([int(ad_info[0])]))
eligible_ads_feat = np.append(eligible_ads_feat, np.array([extract_features(ad_info[1:])]), axis=0)
sorted_inds = np.argsort(eligible_ads_ids)
eligible_ads_ids = eligible_ads_ids[sorted_inds]
eligible_ads_feat = eligible_ads_feat[sorted_inds]
return (timestamp, offered_ad_id, click, user_feat, eligible_ads_ids, eligible_ads_feat)
|
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif (x.find('rc') != (- 1)):
patch_version = x.split('rc')
digit_version.append((int(patch_version[0]) - 1))
digit_version.append(int(patch_version[1]))
return digit_version
|
def init_segmentor(config, checkpoint=None, device='cuda:0'):
'Initialize a segmentor from config file.\n\n Args:\n config (str or :obj:`mmcv.Config`): Config file path or the config\n object.\n checkpoint (str, optional): Checkpoint path. If left as None, the model\n will not load any weights.\n\n Returns:\n nn.Module: The constructed segmentor.\n '
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif (not isinstance(config, mmcv.Config)):
raise TypeError('config must be a filename or Config object, but got {}'.format(type(config)))
config.model.pretrained = None
model = build_segmentor(config.model, test_cfg=config.test_cfg)
if (checkpoint is not None):
checkpoint = load_checkpoint(model, checkpoint)
model.CLASSES = checkpoint['meta']['CLASSES']
model.PALETTE = checkpoint['meta']['PALETTE']
model.cfg = config
model.to(device)
model.eval()
return model
|
class LoadImage():
'A simple pipeline to load image.'
def __call__(self, results):
'Call function to load images into results.\n\n Args:\n results (dict): A result dict contains the file name\n of the image to be read.\n\n Returns:\n dict: ``results`` will be returned containing loaded image.\n '
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
|
def inference_segmentor(model, img):
'Inference image(s) with the segmentor.\n\n Args:\n model (nn.Module): The loaded segmentor.\n imgs (str/ndarray or list[str/ndarray]): Either image files or loaded\n images.\n\n Returns:\n (list[Tensor]): The segmentation result.\n '
cfg = model.cfg
device = next(model.parameters()).device
test_pipeline = ([LoadImage()] + cfg.data.test.pipeline[1:])
test_pipeline = Compose(test_pipeline)
data = dict(img=img)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
data = scatter(data, [device])[0]
else:
data['img_metas'] = data['img_metas'][0].data
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
|
def show_result_pyplot(model, img, result, palette=None, fig_size=(15, 10)):
'Visualize the segmentation results on the image.\n\n Args:\n model (nn.Module): The loaded segmentor.\n img (str or np.ndarray): Image filename or loaded image.\n result (list): The segmentation result.\n palette (list[list[int]]] | None): The palette of segmentation\n map. If None is given, random palette will be generated.\n Default: None\n fig_size (tuple): Figure size of the pyplot figure.\n '
if hasattr(model, 'module'):
model = model.module
img = model.show_result(img, result, palette=palette, show=False)
plt.figure(figsize=fig_size)
plt.imshow(mmcv.bgr2rgb(img))
plt.show()
|
def set_random_seed(seed, deterministic=False):
'Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n '
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
|
def train_segmentor(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None):
'Launch segmentor training.'
logger = get_root_logger(cfg.log_level)
dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset])
data_loaders = [build_dataloader(ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, drop_last=True) for ds in dataset]
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
optimizer = build_optimizer(model, cfg.optimizer)
runner = IterBasedRunner(model=model, batch_processor=None, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta)
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None))
runner.timestamp = timestamp
if validate:
test_dataset = build_dataset(cfg.data.test, dict(test_mode=True))
test_dataloader = build_dataloader(test_dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
eval_cfg = cfg.get('test_evaluation', {})
eval_hook = (DistEvalHook if distributed else EvalHook)
runner.register_hook(eval_hook(test_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_iters)
|
def cityscapes_classes():
'Cityscapes class names for external use.'
return ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
|
def ade_classes():
'ADE20K class names for external use.'
return ['wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', 'clock', 'flag']
|
def voc_classes():
'Pascal VOC class names for external use.'
return ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
|
def cityscapes_palette():
'Cityscapes palette for external use.'
return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]]
|
def ade_palette():
'ADE20K palette for external use.'
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], [102, 255, 0], [92, 0, 255]]
|
def voc_palette():
'Pascal VOC palette for external use.'
return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
|
def get_classes(dataset):
'Get class names of a dataset.'
alias2name = {}
for (name, aliases) in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if (dataset in alias2name):
labels = eval((alias2name[dataset] + '_classes()'))
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
|
def get_palette(dataset):
'Get class palette (RGB) of a dataset.'
alias2name = {}
for (name, aliases) in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if (dataset in alias2name):
labels = eval((alias2name[dataset] + '_palette()'))
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
|
class EvalHook(Hook):
'Evaluation hook.\n\n Attributes:\n dataloader (DataLoader): A PyTorch dataloader.\n interval (int): Evaluation interval (by epochs). Default: 1.\n '
def __init__(self, dataloader, interval=1, **eval_kwargs):
if (not isinstance(dataloader, DataLoader)):
raise TypeError(f'dataloader must be a pytorch DataLoader, but got {type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.eval_kwargs = eval_kwargs
def after_train_iter(self, runner):
'After train epoch hook.'
if (not self.every_n_iters(runner, self.interval)):
return
from mmseg.apis import single_gpu_test
runner.log_buffer.clear()
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def after_run(self, runner):
'After train epoch hook.'
from mmseg.apis import single_gpu_test
runner.log_buffer.clear()
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def evaluate(self, runner, results):
'Call evaluate function of dataset.'
eval_res = self.dataloader.dataset.evaluate(results, logger=runner.logger, **self.eval_kwargs)
for (name, val) in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
|
class DistEvalHook(EvalHook):
'Distributed evaluation hook.\n\n Attributes:\n dataloader (DataLoader): A PyTorch dataloader.\n interval (int): Evaluation interval (by epochs). Default: 1.\n tmpdir (str | None): Temporary directory to save the results of all\n processes. Default: None.\n gpu_collect (bool): Whether to use gpu or cpu to collect results.\n Default: False.\n '
def __init__(self, dataloader, interval=1, gpu_collect=False, **eval_kwargs):
if (not isinstance(dataloader, DataLoader)):
raise TypeError('dataloader must be a pytorch DataLoader, but got {}'.format(type(dataloader)))
self.dataloader = dataloader
self.interval = interval
self.gpu_collect = gpu_collect
self.eval_kwargs = eval_kwargs
def after_train_iter(self, runner):
'After train epoch hook.'
if (not self.every_n_iters(runner, self.interval)):
return
from mmseg.apis import multi_gpu_test
runner.log_buffer.clear()
results = multi_gpu_test(runner.model, self.dataloader, tmpdir=osp.join(runner.work_dir, '.eval_hook'), gpu_collect=self.gpu_collect, state=self.eval_kwargs['state'])
if (runner.rank == 0):
print('\n')
self.evaluate(runner, results)
def after_run(self, runner):
'After train epoch hook.'
from mmseg.apis import multi_gpu_test
runner.log_buffer.clear()
results = multi_gpu_test(runner.model, self.dataloader, tmpdir=osp.join(runner.work_dir, '.eval_hook'), gpu_collect=self.gpu_collect, state=self.eval_kwargs['state'])
if (runner.rank == 0):
print('\n')
self.evaluate(runner, results)
|
def build_pixel_sampler(cfg, **default_args):
'Build pixel sampler for segmentation map.'
return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args)
|
class BasePixelSampler(metaclass=ABCMeta):
'Base class of pixel sampler.'
def __init__(self, **kwargs):
pass
@abstractmethod
def sample(self, seg_logit, seg_label):
'Placeholder for sample function.'
pass
|
@PIXEL_SAMPLERS.register_module()
class OHEMPixelSampler(BasePixelSampler):
'Online Hard Example Mining Sampler for segmentation.\n\n Args:\n context (nn.Module): The context of sampler, subclass of\n :obj:`BaseDecodeHead`.\n thresh (float, optional): The threshold for hard example selection.\n Below which, are prediction with low confidence. If not\n specified, the hard examples will be pixels of top ``min_kept``\n loss. Default: None.\n min_kept (int, optional): The minimum number of predictions to keep.\n Default: 100000.\n '
def __init__(self, context, thresh=None, min_kept=100000):
super(OHEMPixelSampler, self).__init__()
self.context = context
assert (min_kept > 1)
self.thresh = thresh
self.min_kept = min_kept
def sample(self, seg_logit, seg_label):
'Sample pixels that have high loss or with low prediction confidence.\n\n Args:\n seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W)\n seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W)\n\n Returns:\n torch.Tensor: segmentation weight, shape (N, H, W)\n '
with torch.no_grad():
assert (seg_logit.shape[2:] == seg_label.shape[2:])
assert (seg_label.shape[1] == 1)
seg_label = seg_label.squeeze(1).long()
batch_kept = (self.min_kept * seg_label.size(0))
valid_mask = (seg_label != self.context.ignore_index)
seg_weight = seg_logit.new_zeros(size=seg_label.size())
valid_seg_weight = seg_weight[valid_mask]
if (self.thresh is not None):
seg_prob = F.softmax(seg_logit, dim=1)
tmp_seg_label = seg_label.clone().unsqueeze(1)
tmp_seg_label[(tmp_seg_label == self.context.ignore_index)] = 0
seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1)
(sort_prob, sort_indices) = seg_prob[valid_mask].sort()
if (sort_prob.numel() > 0):
min_threshold = sort_prob[min(batch_kept, (sort_prob.numel() - 1))]
else:
min_threshold = 0.0
threshold = max(min_threshold, self.thresh)
valid_seg_weight[(seg_prob[valid_mask] < threshold)] = 1.0
else:
losses = self.context.loss_decode(seg_logit, seg_label, weight=None, ignore_index=self.context.ignore_index, reduction_override='none')
(_, sort_indices) = losses[valid_mask].sort(descending=True)
valid_seg_weight[sort_indices[:batch_kept]] = 1.0
seg_weight[valid_mask] = valid_seg_weight
return seg_weight
|
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=(- 1)):
if (bucket_size_mb > 0):
bucket_size_bytes = ((bucket_size_mb * 1024) * 1024)
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if (tp not in buckets):
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for (tensor, synced) in zip(bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
|
def allreduce_grads(params, coalesce=True, bucket_size_mb=(- 1)):
'Allreduce gradients.\n\n Args:\n params (list[torch.Parameters]): List of parameters of a model\n coalesce (bool, optional): Whether allreduce parameters as a whole.\n Defaults to True.\n bucket_size_mb (int, optional): Size of bucket, the unit is MB.\n Defaults to -1.\n '
grads = [param.grad.data for param in params if (param.requires_grad and (param.grad is not None))]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
|
def add_prefix(inputs, prefix):
'Add prefix for dict.\n\n Args:\n inputs (dict): The input dict with str keys.\n prefix (str): The prefix to add.\n\n Returns:\n\n dict: The dict with keys updated with ``prefix``.\n '
outputs = dict()
for (name, value) in inputs.items():
outputs[f'{prefix}.{name}'] = value
return outputs
|
@DATASETS.register_module()
class AMDDataset(CustomDataset):
'Pascal VOC dataset.\n\n Args:\n split (str): Split txt file for Pascal VOC.\n '
CLASSES = ('background', 'foreground')
PALETTE = [[0, 0, 0], [128, 0, 0]]
def __init__(self, split, **kwargs):
super(AMDDataset, self).__init__(img_suffix='.jpg', seg_map_suffix='.jpg', split=split, **kwargs)
assert (osp.exists(self.img_dir) and (self.split is not None))
|
def _concat_dataset(cfg, default_args=None):
'Build :obj:`ConcatDataset by.'
from .dataset_wrappers import ConcatDataset
img_dir = cfg['img_dir']
ann_dir = cfg.get('ann_dir', None)
split = cfg.get('split', None)
num_img_dir = (len(img_dir) if isinstance(img_dir, (list, tuple)) else 1)
if (ann_dir is not None):
num_ann_dir = (len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1)
else:
num_ann_dir = 0
if (split is not None):
num_split = (len(split) if isinstance(split, (list, tuple)) else 1)
else:
num_split = 0
if (num_img_dir > 1):
assert ((num_img_dir == num_ann_dir) or (num_ann_dir == 0))
assert ((num_img_dir == num_split) or (num_split == 0))
else:
assert ((num_split == num_ann_dir) or (num_ann_dir <= 1))
num_dset = max(num_split, num_img_dir)
datasets = []
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
if isinstance(img_dir, (list, tuple)):
data_cfg['img_dir'] = img_dir[i]
if isinstance(ann_dir, (list, tuple)):
data_cfg['ann_dir'] = ann_dir[i]
if isinstance(split, (list, tuple)):
data_cfg['split'] = split[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets)
|
def build_dataset(cfg, default_args=None):
'Build datasets.'
from .dataset_wrappers import ConcatDataset, RepeatDataset
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif (cfg['type'] == 'RepeatDataset'):
dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args), cfg['times'])
elif (isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(cfg.get('split', None), (list, tuple))):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
|
def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, drop_last=False, pin_memory=True, dataloader_type='PoolDataLoader', **kwargs):
"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n seed (int | None): Seed to be used. Default: None.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: False\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader'\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n "
(rank, world_size) = get_dist_info()
if dist:
sampler = DistributedSampler(dataset, world_size, rank, shuffle=shuffle)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = (num_gpus * samples_per_gpu)
num_workers = (num_gpus * workers_per_gpu)
init_fn = (partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if (seed is not None) else None)
assert (dataloader_type in ('DataLoader', 'PoolDataLoader')), f'unsupported dataloader {dataloader_type}'
if (dataloader_type == 'PoolDataLoader'):
dataloader = PoolDataLoader
elif (dataloader_type == 'DataLoader'):
dataloader = DataLoader
data_loader = dataloader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=pin_memory, shuffle=shuffle, worker_init_fn=init_fn, drop_last=drop_last, **kwargs)
return data_loader
|
def worker_init_fn(worker_id, num_workers, rank, seed):
'Worker init func for dataloader.\n\n The seed of each worker equals to num_worker * rank + worker_id + user_seed\n\n Args:\n worker_id (int): Worker id.\n num_workers (int): Number of workers.\n rank (int): The rank of current process.\n seed (int): The random seed to use.\n '
worker_seed = (((num_workers * rank) + worker_id) + seed)
np.random.seed(worker_seed)
random.seed(worker_seed)
|
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
'A wrapper of concatenated dataset.\n\n Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but\n concat the group flag for image aspect ratio.\n\n Args:\n datasets (list[:obj:`Dataset`]): A list of datasets.\n '
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
self.PALETTE = datasets[0].PALETTE
|
@DATASETS.register_module()
class RepeatDataset(object):
'A wrapper of repeated dataset.\n\n The length of repeated dataset will be `times` larger than the original\n dataset. This is useful when the data loading time is long but the dataset\n is small. Using RepeatDataset can reduce the data loading time between\n epochs.\n\n Args:\n dataset (:obj:`Dataset`): The dataset to be repeated.\n times (int): Repeat times.\n '
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
self.PALETTE = dataset.PALETTE
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
'Get item from original dataset.'
return self.dataset[(idx % self._ori_len)]
def __len__(self):
'The length is multiplied by ``times``'
return (self.times * self._ori_len)
|
@PIPELINES.register_module()
class Compose(object):
'Compose multiple transforms sequentially.\n\n Args:\n transforms (Sequence[dict | callable]): Sequence of transform object or\n config dict to be composed.\n '
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
'Call function to apply transforms sequentially.\n\n Args:\n data (dict): A result dict contains the data to transform.\n\n Returns:\n dict: Transformed data.\n '
for t in self.transforms:
data = t(data)
if (data is None):
return None
return data
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
for t in self.transforms:
format_string += '\n'
format_string += f' {t}'
format_string += '\n)'
return format_string
|
def to_tensor(data):
'Convert objects of various python types to :obj:`torch.Tensor`.\n\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n\n Args:\n data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to\n be converted.\n '
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif (isinstance(data, Sequence) and (not mmcv.is_str(data))):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
|
@PIPELINES.register_module()
class ToTensor(object):
'Convert some results to :obj:`torch.Tensor` by given keys.\n\n Args:\n keys (Sequence[str]): Keys that need to be converted to Tensor.\n '
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
'Call function to convert data in results to :obj:`torch.Tensor`.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data converted\n to :obj:`torch.Tensor`.\n '
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys})')
|
@PIPELINES.register_module()
class ImageToTensor(object):
'Convert image to :obj:`torch.Tensor` by given keys.\n\n The dimension order of input image is (H, W, C). The pipeline will convert\n it to (C, H, W). If only 2 dimension (H, W) is given, the output would be\n (1, H, W).\n\n Args:\n keys (Sequence[str]): Key of images to be converted to Tensor.\n '
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
'Call function to convert image in results to :obj:`torch.Tensor` and\n transpose the channel order.\n\n Args:\n results (dict): Result dict contains the image data to convert.\n\n Returns:\n dict: The result dict contains the image converted\n to :obj:`torch.Tensor` and transposed to (C, H, W) order.\n '
for key in self.keys:
img = results[key]
if (len(img.shape) < 3):
img = np.expand_dims(img, (- 1))
results[key] = to_tensor(img.transpose(2, 0, 1))
return results
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys})')
|
@PIPELINES.register_module()
class Transpose(object):
'Transpose some results by given keys.\n\n Args:\n keys (Sequence[str]): Keys of results to be transposed.\n order (Sequence[int]): Order of transpose.\n '
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
'Call function to convert image in results to :obj:`torch.Tensor` and\n transpose the channel order.\n\n Args:\n results (dict): Result dict contains the image data to convert.\n\n Returns:\n dict: The result dict contains the image converted\n to :obj:`torch.Tensor` and transposed to (C, H, W) order.\n '
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys}, order={self.order})')
|
@PIPELINES.register_module()
class ToDataContainer(object):
"Convert results to :obj:`mmcv.DataContainer` by given fields.\n\n Args:\n fields (Sequence[dict]): Each field is a dict like\n ``dict(key='xxx', **kwargs)``. The ``key`` in result will\n be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.\n Default: ``(dict(key='img', stack=True),\n dict(key='gt_semantic_seg'))``.\n "
def __init__(self, fields=(dict(key='img', stack=True), dict(key='gt_semantic_seg'))):
self.fields = fields
def __call__(self, results):
'Call function to convert data in results to\n :obj:`mmcv.DataContainer`.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data converted to\n :obj:`mmcv.DataContainer`.\n '
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return (self.__class__.__name__ + f'(fields={self.fields})')
|
@PIPELINES.register_module()
class DefaultFormatBundle(object):
'Default formatting bundle.\n\n It simplifies the pipeline of formatting common fields, including "img"\n and "gt_semantic_seg". These fields are formatted as follows.\n\n - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)\n - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,\n (3)to DataContainer (stack=True)\n '
def __call__(self, results):
'Call function to transform and format common fields in results.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data that is formatted with\n default bundle.\n '
if ('img' in results):
img = results['img']
if (len(img.shape) < 3):
img = np.expand_dims(img, (- 1))
if (len(img.shape) == 4):
img = np.ascontiguousarray(img.transpose(0, 3, 1, 2))
else:
assert (len(img.shape) == 3)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
if ('gt_semantic_seg' in results):
results['gt_semantic_seg'] = DC(to_tensor(results['gt_semantic_seg'][(None, ...)].astype(np.int64)), stack=True)
return results
def __repr__(self):
return self.__class__.__name__
|
@PIPELINES.register_module()
class Collect(object):
'Collect data from the loader relevant to the specific task.\n\n This is usually the last stage of the data loader pipeline. Typically keys\n is set to some subset of "img", "gt_semantic_seg".\n\n The "img_meta" item is always populated. The contents of the "img_meta"\n dictionary depends on "meta_keys". By default this includes:\n\n - "img_shape": shape of the image input to the network as a tuple\n (h, w, c). Note that images may be zero padded on the bottom/right\n if the batch tensor is larger than this shape.\n\n - "scale_factor": a float indicating the preprocessing scale\n\n - "flip": a boolean indicating if image flip transform was used\n\n - "filename": path to the image file\n\n - "ori_shape": original shape of the image as a tuple (h, w, c)\n\n - "pad_shape": image shape after padding\n\n - "img_norm_cfg": a dict of normalization information:\n - mean - per channel mean subtraction\n - std - per channel std divisor\n - to_rgb - bool indicating if bgr was converted to rgb\n\n Args:\n keys (Sequence[str]): Keys of results to be collected in ``data``.\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ``(\'filename\', \'ori_filename\', \'ori_shape\', \'img_shape\',\n \'pad_shape\', \'scale_factor\', \'flip\', \'flip_direction\',\n \'img_norm_cfg\')``\n '
def __init__(self, keys, meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
'Call function to collect keys in results. The keys in ``meta_keys``\n will be converted to :obj:mmcv.DataContainer.\n\n Args:\n results (dict): Result dict contains the data to collect.\n\n Returns:\n dict: The result dict contains the following keys\n - keys in``self.keys``\n - ``img_metas``\n '
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys}, meta_keys={self.meta_keys})')
|
@PIPELINES.register_module()
class LoadImageFromFile(object):
'Load an image from file.\n\n Required keys are "img_prefix" and "img_info" (a dict that must contain the\n key "filename"). Added or updated keys are "filename", "img", "img_shape",\n "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),\n "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).\n\n Args:\n to_float32 (bool): Whether to convert the loaded image to a float32\n numpy array. If set to False, the loaded image is an uint8 array.\n Defaults to False.\n color_type (str): The flag argument for :func:`mmcv.imfrombytes`.\n Defaults to \'color\'.\n file_client_args (dict): Arguments to instantiate a FileClient.\n See :class:`mmcv.fileio.FileClient` for details.\n Defaults to ``dict(backend=\'disk\')``.\n imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:\n \'cv2\'\n '
def __init__(self, load_num=2, step_limit=1, is_train=True, use_gauss_blur=False, to_float32=False, color_type='color', file_client_args=dict(backend='disk'), imdecode_backend='cv2'):
self.use_gauss_blur = use_gauss_blur
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
self.is_train = is_train
self.load_num = load_num
self.step_limit = step_limit
def __call__(self, results):
'Call functions to load image and get image meta information.\n\n Args:\n results (dict): Result dict from :obj:`mmseg.CustomDataset`.\n\n Returns:\n dict: The dict contains loaded image and meta information.\n '
if (self.file_client is None):
self.file_client = mmcv.FileClient(**self.file_client_args)
if (results.get('img_prefix') is not None):
filename = osp.join(results['img_prefix'], results['img_info']['filename'])
else:
filename = results['img_info']['filename']
(folder, fn_list) = filename
if self.is_train:
st = random.randint(0, (len(fn_list) - 1))
num = self.load_num
step = random.randint(1, self.step_limit)
pass
else:
st = 0
num = 1
step = 1
if (len(fn_list[st:]) < (step * num)):
print('[frame num] less than step*num len=', len(fn_list), 'st=', st)
while (len(fn_list[st:]) < (step * num)):
fn_list = (fn_list + fn_list[::(- 1)])
fn_list = [fn_list[it] for it in range(st, (st + (num * step)), step)]
imgs = []
for fn in fn_list:
fp_fn = os.path.join(folder, fn)
img_bytes = self.file_client.get(fp_fn)
img = mmcv.imfrombytes(img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
if self.use_gauss_blur:
import scipy.ndimage as ndimage
img = ndimage.gaussian_filter(img, sigma=(3, 3, 0), order=0)
if (img.shape[0] > img.shape[1]):
img = np.transpose(img, [1, 0, 2])
imgs.append(img)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = imgs
results['img_shape'] = imgs[0].shape
results['ori_shape'] = imgs[0].shape
results['pad_shape'] = imgs[0].shape
results['scale_factor'] = 1.0
num_channels = 3
results['img_norm_cfg'] = dict(mean=np.zeros(num_channels, dtype=np.float32), std=np.ones(num_channels, dtype=np.float32), to_rgb=False)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(to_float32={self.to_float32},'
repr_str += f"color_type='{self.color_type}',"
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
return repr_str
|
@PIPELINES.register_module()
class LoadAnnotations(object):
"Load annotations for semantic segmentation.\n\n Args:\n reduct_zero_label (bool): Whether reduce all label value by 1.\n Usually used for datasets where 0 is background label.\n Default: False.\n file_client_args (dict): Arguments to instantiate a FileClient.\n See :class:`mmcv.fileio.FileClient` for details.\n Defaults to ``dict(backend='disk')``.\n imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:\n 'pillow'\n "
def __init__(self, reduce_zero_label=False, file_client_args=dict(backend='disk'), imdecode_backend='pillow'):
self.reduce_zero_label = reduce_zero_label
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
def __call__(self, results):
'Call function to load multiple types annotations.\n\n Args:\n results (dict): Result dict from :obj:`mmseg.CustomDataset`.\n\n Returns:\n dict: The dict contains loaded semantic segmentation annotations.\n '
if (self.file_client is None):
self.file_client = mmcv.FileClient(**self.file_client_args)
if (results.get('seg_prefix', None) is not None):
filename1 = osp.join(results['seg_prefix'], results['ann_info']['seg_map'])
else:
filename1 = results['ann_info']['seg_map']
img_bytes = self.file_client.get(filename1)
flow_x = mmcv.imfrombytes(img_bytes, flag='unchanged', backend=self.imdecode_backend).squeeze().astype(np.uint8)
if (flow_x.shape[0] > flow_x.shape[1]):
flow_x = np.transpose(flow_x, [1, 0, 2])
flow_x_bin = flow_x
flow_y_bin = flow_x_bin
results['flow_x'] = flow_x_bin
results['flow_y'] = flow_y_bin
results['seg_fields'].append('flow_x')
results['seg_fields'].append('flow_y')
return results
img_bytes = self.file_client.get(filename1)
gt_semantic_seg = mmcv.imfrombytes(img_bytes, flag='unchanged', backend=self.imdecode_backend).squeeze().astype(np.uint8)
if self.reduce_zero_label:
gt_semantic_seg[(gt_semantic_seg == 0)] = 255
gt_semantic_seg = (gt_semantic_seg - 1)
gt_semantic_seg[(gt_semantic_seg == 254)] = 255
results['gt_semantic_seg'] = gt_semantic_seg
results['seg_fields'].append('gt_semantic_seg')
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(reduce_zero_label={self.reduce_zero_label},'
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
return repr_str
|
@PIPELINES.register_module()
class MultiScaleFlipAug(object):
'Test-time augmentation with multiple scales and flipping.\n\n An example configuration is as followed:\n\n .. code-block::\n\n img_scale=(2048, 1024),\n img_ratios=[0.5, 1.0],\n flip=True,\n transforms=[\n dict(type=\'Resize\', keep_ratio=True),\n dict(type=\'RandomFlip\'),\n dict(type=\'Normalize\', **img_norm_cfg),\n dict(type=\'Pad\', size_divisor=32),\n dict(type=\'ImageToTensor\', keys=[\'img\']),\n dict(type=\'Collect\', keys=[\'img\']),\n ]\n\n After MultiScaleFLipAug with above configuration, the results are wrapped\n into lists of the same length as followed:\n\n .. code-block::\n\n dict(\n img=[...],\n img_shape=[...],\n scale=[(1024, 512), (1024, 512), (2048, 1024), (2048, 1024)]\n flip=[False, True, False, True]\n ...\n )\n\n Args:\n transforms (list[dict]): Transforms to apply in each augmentation.\n img_scale (tuple | list[tuple]): Images scales for resizing.\n img_ratios (float | list[float]): Image ratios for resizing\n flip (bool): Whether apply flip augmentation. Default: False.\n flip_direction (str | list[str]): Flip augmentation directions,\n options are "horizontal" and "vertical". If flip_direction is list,\n multiple flip augmentations will be applied.\n It has no effect when flip == False. Default: "horizontal".\n '
def __init__(self, transforms, img_scale, img_ratios=None, flip=False, flip_direction='horizontal'):
self.transforms = Compose(transforms)
if (img_ratios is not None):
img_ratios = (img_ratios if isinstance(img_ratios, list) else [img_ratios])
assert mmcv.is_list_of(img_ratios, float)
assert (isinstance(img_scale, tuple) and (len(img_scale) == 2))
self.img_scale = [(int((img_scale[0] * ratio)), int((img_scale[1] * ratio))) for ratio in img_ratios]
else:
self.img_scale = (img_scale if isinstance(img_scale, list) else [img_scale])
assert mmcv.is_list_of(self.img_scale, tuple)
self.flip = flip
self.flip_direction = (flip_direction if isinstance(flip_direction, list) else [flip_direction])
assert mmcv.is_list_of(self.flip_direction, str)
if ((not self.flip) and (self.flip_direction != ['horizontal'])):
warnings.warn('flip_direction has no effect when flip is set to False')
if (self.flip and (not any([(t['type'] == 'RandomFlip') for t in transforms]))):
warnings.warn('flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
'Call function to apply test time augment transforms on results.\n\n Args:\n results (dict): Result dict contains the data to transform.\n\n Returns:\n dict[str: list]: The augmented data, where each value is wrapped\n into a list.\n '
aug_data = []
flip_aug = ([False, True] if self.flip else [False])
for scale in self.img_scale:
for flip in flip_aug:
for direction in self.flip_direction:
_results = results.copy()
_results['scale'] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for (key, val) in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip})'
repr_str += f'flip_direction={self.flip_direction}'
return repr_str
|
class BasicBlock(nn.Module):
'Basic block for ResNet.'
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None):
super(BasicBlock, self).__init__()
assert (dcn is None), 'Not implemented yet.'
assert (plugins is None), 'Not implemented yet.'
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
'nn.Module: normalization layer after the first convolution layer'
return getattr(self, self.norm1_name)
@property
def norm2(self):
'nn.Module: normalization layer after the second convolution layer'
return getattr(self, self.norm2_name)
def forward(self, x):
'Forward function.'
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
'Bottleneck block for ResNet.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is\n "caffe", the stride-two layer is the first 1x1 conv layer.\n '
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None):
super(Bottleneck, self).__init__()
assert (style in ['pytorch', 'caffe'])
assert ((dcn is None) or isinstance(dcn, dict))
assert ((plugins is None) or isinstance(plugins, list))
if (plugins is not None):
allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
assert all(((p['position'] in allowed_position) for p in plugins))
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.dcn = dcn
self.with_dcn = (dcn is not None)
self.plugins = plugins
self.with_plugins = (plugins is not None)
if self.with_plugins:
self.after_conv1_plugins = [plugin['cfg'] for plugin in plugins if (plugin['position'] == 'after_conv1')]
self.after_conv2_plugins = [plugin['cfg'] for plugin in plugins if (plugin['position'] == 'after_conv2')]
self.after_conv3_plugins = [plugin['cfg'] for plugin in plugins if (plugin['position'] == 'after_conv3')]
if (self.style == 'pytorch'):
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2)
(self.norm3_name, norm3) = build_norm_layer(norm_cfg, (planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = dcn.pop('fallback_on_stride', False)
if ((not self.with_dcn) or fallback_on_stride):
self.conv2 = build_conv_layer(conv_cfg, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False)
else:
assert (self.conv_cfg is None), 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(dcn, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(conv_cfg, planes, (planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
if self.with_plugins:
self.after_conv1_plugin_names = self.make_block_plugins(planes, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(planes, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins((planes * self.expansion), self.after_conv3_plugins)
def make_block_plugins(self, in_channels, plugins):
'make plugins for block.\n\n Args:\n in_channels (int): Input channels of plugin.\n plugins (list[dict]): List of plugins cfg to build.\n\n Returns:\n list[str]: List of the names of plugin.\n '
assert isinstance(plugins, list)
plugin_names = []
for plugin in plugins:
plugin = plugin.copy()
(name, layer) = build_plugin_layer(plugin, in_channels=in_channels, postfix=plugin.pop('postfix', ''))
assert (not hasattr(self, name)), f'duplicate plugin {name}'
self.add_module(name, layer)
plugin_names.append(name)
return plugin_names
def forward_plugin(self, x, plugin_names):
'Forward function for plugins.'
out = x
for name in plugin_names:
out = getattr(self, name)(x)
return out
@property
def norm1(self):
'nn.Module: normalization layer after the first convolution layer'
return getattr(self, self.norm1_name)
@property
def norm2(self):
'nn.Module: normalization layer after the second convolution layer'
return getattr(self, self.norm2_name)
@property
def norm3(self):
'nn.Module: normalization layer after the third convolution layer'
return getattr(self, self.norm3_name)
def forward(self, x):
'Forward function.'
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
|
@BACKBONES.register_module()
class ResNet(nn.Module):
'ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Default" 3.\n stem_channels (int): Number of stem channels. Default: 64.\n base_channels (int): Number of base channels of res layer. Default: 64.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottleneck.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n plugins (list[dict]): List of plugins for stages, each dict contains:\n\n - cfg (dict, required): Cfg dict to build plugin.\n\n - position (str, required): Position inside block to insert plugin,\n options: \'after_conv1\', \'after_conv2\', \'after_conv3\'.\n\n - stages (tuple[bool], optional): Stages to apply plugin, length\n should be same as \'num_stages\'\n multi_grid (Sequence[int]|None): Multi grid dilation rates of last\n stage. Default: None\n contract_dilation (bool): Whether contract first dilation of each layer\n Default: False\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n\n Example:\n >>> from mmseg.models import ResNet\n >>> import torch\n >>> self = ResNet(depth=18)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 64, 8, 8)\n (1, 128, 4, 4)\n (1, 256, 2, 2)\n (1, 512, 1, 1)\n '
arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, depth, in_channels=3, stem_channels=64, base_channels=64, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=False, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, multi_grid=None, contract_dilation=False, with_cp=False, zero_init_residual=True):
super(ResNet, self).__init__()
if (depth not in self.arch_settings):
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert ((num_stages >= 1) and (num_stages <= 4))
self.strides = strides
self.dilations = dilations
assert (len(strides) == len(dilations) == num_stages)
self.out_indices = out_indices
assert (max(out_indices) < num_stages)
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if (dcn is not None):
assert (len(stage_with_dcn) == num_stages)
self.plugins = plugins
self.multi_grid = multi_grid
self.contract_dilation = contract_dilation
self.zero_init_residual = zero_init_residual
(self.block, stage_blocks) = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = stem_channels
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
for (i, num_blocks) in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = (self.dcn if self.stage_with_dcn[i] else None)
if (plugins is not None):
stage_plugins = self.make_stage_plugins(plugins, i)
else:
stage_plugins = None
stage_multi_grid = (multi_grid if (i == (len(self.stage_blocks) - 1)) else None)
planes = (base_channels * (2 ** i))
res_layer = self.make_res_layer(block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=stage_plugins, multi_grid=stage_multi_grid, contract_dilation=contract_dilation)
self.inplanes = (planes * self.block.expansion)
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = ((self.block.expansion * base_channels) * (2 ** (len(self.stage_blocks) - 1)))
def make_stage_plugins(self, plugins, stage_idx):
"make plugins for ResNet 'stage_idx'th stage .\n\n Currently we support to insert 'context_block',\n 'empirical_attention_block', 'nonlocal_block' into the backbone like\n ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of\n Bottleneck.\n\n An example of plugins format could be :\n >>> plugins=[\n ... dict(cfg=dict(type='xxx', arg1='xxx'),\n ... stages=(False, True, True, True),\n ... position='after_conv2'),\n ... dict(cfg=dict(type='yyy'),\n ... stages=(True, True, True, True),\n ... position='after_conv3'),\n ... dict(cfg=dict(type='zzz', postfix='1'),\n ... stages=(True, True, True, True),\n ... position='after_conv3'),\n ... dict(cfg=dict(type='zzz', postfix='2'),\n ... stages=(True, True, True, True),\n ... position='after_conv3')\n ... ]\n >>> self = ResNet(depth=18)\n >>> stage_plugins = self.make_stage_plugins(plugins, 0)\n >>> assert len(stage_plugins) == 3\n\n Suppose 'stage_idx=0', the structure of blocks in the stage would be:\n conv1-> conv2->conv3->yyy->zzz1->zzz2\n Suppose 'stage_idx=1', the structure of blocks in the stage would be:\n conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2\n\n If stages is missing, the plugin would be applied to all stages.\n\n Args:\n plugins (list[dict]): List of plugins cfg to build. The postfix is\n required if multiple same type plugins are inserted.\n stage_idx (int): Index of stage to build\n\n Returns:\n list[dict]: Plugins for current stage\n "
stage_plugins = []
for plugin in plugins:
plugin = plugin.copy()
stages = plugin.pop('stages', None)
assert ((stages is None) or (len(stages) == self.num_stages))
if ((stages is None) or stages[stage_idx]):
stage_plugins.append(plugin)
return stage_plugins
def make_res_layer(self, **kwargs):
'Pack all blocks in a stage into a ``ResLayer``.'
return ResLayer(**kwargs)
@property
def norm1(self):
'nn.Module: the normalization layer named "norm1" '
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels):
'Make stem layer for ResNet.'
if self.deep_stem:
self.stem = nn.Sequential(build_conv_layer(self.conv_cfg, in_channels, (stem_channels // 2), kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, (stem_channels // 2))[1], nn.ReLU(inplace=True), build_conv_layer(self.conv_cfg, (stem_channels // 2), (stem_channels // 2), kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, (stem_channels // 2))[1], nn.ReLU(inplace=True), build_conv_layer(self.conv_cfg, (stem_channels // 2), stem_channels, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels)[1], nn.ReLU(inplace=True))
else:
self.conv1 = build_conv_layer(self.conv_cfg, in_channels, stem_channels, kernel_size=7, stride=2, padding=3, bias=False)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
'Freeze stages param and norm stats.'
if (self.frozen_stages >= 0):
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
'Initialize the weights in backbone.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if (self.dcn is not None):
for m in self.modules():
if (isinstance(m, Bottleneck) and hasattr(m, 'conv2_offset')):
constant_init(m.conv2_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
'Forward function.'
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for (i, layer_name) in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
def train(self, mode=True):
'Convert the model into training mode while keep normalization layer\n freezed.'
super(ResNet, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
@BACKBONES.register_module()
class ResNetV1c(ResNet):
'ResNetV1c variant described in [1]_.\n\n Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv\n in the input stem with three 3x3 convs.\n\n References:\n .. [1] https://arxiv.org/pdf/1812.01187.pdf\n '
def __init__(self, **kwargs):
super(ResNetV1c, self).__init__(deep_stem=True, avg_down=False, **kwargs)
|
@BACKBONES.register_module()
class ResNetV1d(ResNet):
'ResNetV1d variant described in [1]_.\n\n Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in\n the input stem with three 3x3 convs. And in the downsampling block, a 2x2\n avg_pool with stride 2 is added before conv, whose stride is changed to 1.\n '
def __init__(self, **kwargs):
super(ResNetV1d, self).__init__(deep_stem=True, avg_down=True, **kwargs)
|
def build(cfg, registry, default_args=None):
'Build a module.\n\n Args:\n cfg (dict, list[dict]): The config of modules, is is either a dict\n or a list of configs.\n registry (:obj:`Registry`): A registry the module belongs to.\n default_args (dict, optional): Default arguments to build the module.\n Defaults to None.\n\n Returns:\n nn.Module: A built nn module.\n '
if isinstance(cfg, list):
modules = [build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg]
return nn.Sequential(*modules)
else:
return build_from_cfg(cfg, registry, default_args)
|
def build_backbone(cfg):
'Build backbone.'
return build(cfg, BACKBONES)
|
def build_neck(cfg):
'Build neck.'
return build(cfg, NECKS)
|
def build_head(cfg):
'Build head.'
return build(cfg, HEADS)
|
def build_loss(cfg):
'Build loss.'
return build(cfg, LOSSES)
|
def build_segmentor(cfg, train_cfg=None, test_cfg=None):
'Build segmentor.'
return build(cfg, SEGMENTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
|
class BaseDecodeHead(nn.Module, metaclass=ABCMeta):
"Base class for BaseDecodeHead.\n\n Args:\n in_channels (int|Sequence[int]): Input channels.\n channels (int): Channels after modules, before conv_seg.\n num_classes (int): Number of classes.\n dropout_ratio (float): Ratio of dropout layer. Default: 0.1.\n conv_cfg (dict|None): Config of conv layers. Default: None.\n norm_cfg (dict|None): Config of norm layers. Default: None.\n act_cfg (dict): Config of activation layers.\n Default: dict(type='ReLU')\n in_index (int|Sequence[int]): Input feature index. Default: -1\n input_transform (str|None): Transformation type of input features.\n Options: 'resize_concat', 'multiple_select', None.\n 'resize_concat': Multiple feature maps will be resize to the\n same size as first one and than concat together.\n Usually used in FCN head of HRNet.\n 'multiple_select': Multiple feature maps will be bundle into\n a list and passed into decode head.\n None: Only one select feature map is allowed.\n Default: None.\n loss_decode (dict): Config of decode loss.\n Default: dict(type='CrossEntropyLoss').\n ignore_index (int): The label index to be ignored. Default: 255\n sampler (dict|None): The config of segmentation map sampler.\n Default: None.\n align_corners (bool): align_corners argument of F.interpolate.\n Default: False.\n "
def __init__(self, in_channels, channels, *, num_classes, dropout_ratio=0.1, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), in_index=(- 1), input_transform=None, loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), ignore_index=255, sampler=None, align_corners=False):
super(BaseDecodeHead, self).__init__()
self._init_inputs(in_channels, in_index, input_transform)
self.channels = channels
self.num_classes = num_classes
self.dropout_ratio = dropout_ratio
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.in_index = in_index
self.loss_decode = build_loss(loss_decode)
self.ignore_index = ignore_index
self.align_corners = align_corners
if (sampler is not None):
self.sampler = build_pixel_sampler(sampler, context=self)
else:
self.sampler = None
if (dropout_ratio > 0):
self.dropout = nn.Dropout2d(dropout_ratio)
else:
self.dropout = None
self.fp16_enabled = False
def extra_repr(self):
'Extra repr.'
s = f'input_transform={self.input_transform}, ignore_index={self.ignore_index}, align_corners={self.align_corners}'
return s
def _init_inputs(self, in_channels, in_index, input_transform):
"Check and initialize input transforms.\n\n The in_channels, in_index and input_transform must match.\n Specifically, when input_transform is None, only single feature map\n will be selected. So in_channels and in_index must be of type int.\n When input_transform\n\n Args:\n in_channels (int|Sequence[int]): Input channels.\n in_index (int|Sequence[int]): Input feature index.\n input_transform (str|None): Transformation type of input features.\n Options: 'resize_concat', 'multiple_select', None.\n 'resize_concat': Multiple feature maps will be resize to the\n same size as first one and than concat together.\n Usually used in FCN head of HRNet.\n 'multiple_select': Multiple feature maps will be bundle into\n a list and passed into decode head.\n None: Only one select feature map is allowed.\n "
if (input_transform is not None):
assert (input_transform in ['resize_concat', 'multiple_select'])
self.input_transform = input_transform
self.in_index = in_index
if (input_transform is not None):
assert isinstance(in_channels, (list, tuple))
assert isinstance(in_index, (list, tuple))
assert (len(in_channels) == len(in_index))
if (input_transform == 'resize_concat'):
self.in_channels = sum(in_channels)
else:
self.in_channels = in_channels
else:
assert isinstance(in_channels, int)
assert isinstance(in_index, int)
self.in_channels = in_channels
def init_weights(self):
'Initialize weights of classification layer.'
normal_init(self.conv_seg, mean=0, std=0.01)
def _transform_inputs(self, inputs):
'Transform inputs for decoder.\n\n Args:\n inputs (list[Tensor]): List of multi-level img features.\n\n Returns:\n Tensor: The transformed inputs\n '
if (self.input_transform == 'resize_concat'):
inputs = [inputs[i] for i in self.in_index]
upsampled_inputs = [resize(input=x, size=inputs[0].shape[2:], mode='bilinear', align_corners=self.align_corners) for x in inputs]
inputs = torch.cat(upsampled_inputs, dim=1)
elif (self.input_transform == 'multiple_select'):
inputs = [inputs[i] for i in self.in_index]
else:
inputs = inputs[self.in_index]
return inputs
@auto_fp16()
@abstractmethod
def forward(self, inputs):
'Placeholder of forward function.'
pass
def forward_train(self, inputs, img_metas, flow_x, flow_y, train_cfg):
"Forward function for training.\n Args:\n inputs (list[Tensor]): List of multi-level img features.\n img_metas (list[dict]): List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmseg/datasets/pipelines/formatting.py:Collect`.\n gt_semantic_seg (Tensor): Semantic segmentation masks\n used if the architecture supports semantic segmentation task.\n train_cfg (dict): The training config.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n "
seg_logits = self.forward(inputs)
(losses, seg_logits) = self.losses(seg_logits, (flow_x, flow_y))
return (losses, seg_logits)
def forward_test(self, inputs, img_metas, test_cfg):
"Forward function for testing.\n\n Args:\n inputs (list[Tensor]): List of multi-level img features.\n img_metas (list[dict]): List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmseg/datasets/pipelines/formatting.py:Collect`.\n test_cfg (dict): The testing config.\n\n Returns:\n Tensor: Output segmentation map.\n "
return self.forward(inputs)
@force_fp32(apply_to=('seg_logit',))
def losses(self, seg_logit, seg_label):
'Compute segmentation loss.'
loss = dict()
_seg_label_size = (seg_label[0].shape[1:] if isinstance(seg_label, tuple) else seg_label.shape[2:])
seg_logit = resize(input=seg_logit, size=_seg_label_size, mode='bilinear', align_corners=self.align_corners)
if (self.sampler is not None):
seg_weight = self.sampler.sample(seg_logit, seg_label)
else:
seg_weight = None
loss['loss_seg'] = self.loss_decode(seg_logit, seg_label, weight=seg_weight, ignore_index=self.ignore_index)
return (loss, seg_logit)
|
def get_loss(cfg):
if (cfg.type == 'unflow'):
loss = unFlowLoss(cfg)
else:
raise NotImplementedError(cfg.type)
return loss
|
class CorrelationFunction(Function):
def __init__(self, pad_size=3, kernel_size=3, max_displacement=20, stride1=1, stride2=2, corr_multiply=1):
super(CorrelationFunction, self).__init__()
self.pad_size = pad_size
self.kernel_size = kernel_size
self.max_displacement = max_displacement
self.stride1 = stride1
self.stride2 = stride2
self.corr_multiply = corr_multiply
def forward(self, input1, input2):
self.save_for_backward(input1, input2)
with torch.cuda.device_of(input1):
rbot1 = input1.new()
rbot2 = input2.new()
output = input1.new()
correlation_cuda.forward(input1, input2, rbot1, rbot2, output, self.pad_size, self.kernel_size, self.max_displacement, self.stride1, self.stride2, self.corr_multiply)
return output
def backward(self, grad_output):
(input1, input2) = self.saved_tensors
with torch.cuda.device_of(input1):
rbot1 = input1.new()
rbot2 = input2.new()
grad_input1 = input1.new()
grad_input2 = input2.new()
correlation_cuda.backward(input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2, self.pad_size, self.kernel_size, self.max_displacement, self.stride1, self.stride2, self.corr_multiply)
return (grad_input1, grad_input2)
|
class Correlation(Module):
def __init__(self, pad_size=0, kernel_size=0, max_displacement=0, stride1=1, stride2=2, corr_multiply=1):
super(Correlation, self).__init__()
self.pad_size = pad_size
self.kernel_size = kernel_size
self.max_displacement = max_displacement
self.stride1 = stride1
self.stride2 = stride2
self.corr_multiply = corr_multiply
def forward(self, input1, input2):
result = CorrelationFunction(self.pad_size, self.kernel_size, self.max_displacement, self.stride1, self.stride2, self.corr_multiply)(input1, input2)
return result
|
def get_model(cfg):
if (cfg.type == 'pwclite'):
model = PWCLite(cfg)
else:
raise NotImplementedError(cfg.type)
return model
|
def update_dict(orig_dict, new_dict):
for (key, val) in new_dict.items():
if isinstance(val, collections.Mapping):
tmp = update_dict(orig_dict.get(key, {}), val)
orig_dict[key] = tmp
else:
orig_dict[key] = val
return orig_dict
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self, i=1, precision=3, names=None):
self.meters = i
self.precision = precision
self.reset(self.meters)
self.names = names
if (names is not None):
assert (self.meters == len(self.names))
else:
self.names = ([''] * self.meters)
def reset(self, i):
self.val = ([0] * i)
self.avg = ([0] * i)
self.sum = ([0] * i)
self.count = ([0] * i)
def update(self, val, n=1):
if (not isinstance(val, list)):
val = [val]
if (not isinstance(n, list)):
n = ([n] * self.meters)
assert ((len(val) == self.meters) and (len(n) == self.meters))
for i in range(self.meters):
self.count[i] += n[i]
for (i, v) in enumerate(val):
self.val[i] = v
self.sum[i] += (v * n[i])
self.avg[i] = (self.sum[i] / self.count[i])
def __repr__(self):
val = ' '.join(['{} {:.{}f}'.format(n, v, self.precision) for (n, v) in zip(self.names, self.val)])
avg = ' '.join(['{} {:.{}f}'.format(n, a, self.precision) for (n, a) in zip(self.names, self.avg)])
return '{} ({})'.format(val, avg)
|
def init_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
|
def weight_parameters(module):
return [param for (name, param) in module.named_parameters() if ('weight' in name)]
|
def bias_parameters(module):
return [param for (name, param) in module.named_parameters() if ('bias' in name)]
|
def load_checkpoint(model_path):
weights = torch.load(model_path)
epoch = None
if ('epoch' in weights):
epoch = weights.pop('epoch')
if ('state_dict' in weights):
state_dict = weights['state_dict']
else:
state_dict = weights
return (epoch, state_dict)
|
def save_checkpoint(save_path, states, file_prefixes, is_best, filename='ckpt.pth.tar'):
def run_one_sample(save_path, state, prefix, is_best, filename):
torch.save(state, (save_path / '{}_{}'.format(prefix, filename)))
if is_best:
shutil.copyfile((save_path / '{}_{}'.format(prefix, filename)), (save_path / '{}_model_best.pth.tar'.format(prefix)))
if (not isinstance(file_prefixes, str)):
for (prefix, state) in zip(file_prefixes, states):
run_one_sample(save_path, state, prefix, is_best, filename)
else:
run_one_sample(save_path, states, file_prefixes, is_best, filename)
|
def restore_model(model, pretrained_file):
(epoch, weights) = load_checkpoint(pretrained_file)
model_keys = set(model.state_dict().keys())
weight_keys = set(weights.keys())
weights_not_in_model = sorted(list((weight_keys - model_keys)))
model_not_in_weights = sorted(list((model_keys - weight_keys)))
if len(model_not_in_weights):
print('Warning: There are weights in model but not in pre-trained.')
for key in model_not_in_weights:
print(key)
weights[key] = model.state_dict()[key]
if len(weights_not_in_model):
print('Warning: There are pre-trained weights not in model.')
for key in weights_not_in_model:
print(key)
from collections import OrderedDict
new_weights = OrderedDict()
for key in model_keys:
new_weights[key] = weights[key]
weights = new_weights
model.load_state_dict(weights)
return model
|
class AdamW(Optimizer):
'Implements AdamW algorithm.\n\n It has been proposed in `Fixing Weight Decay Regularization in Adam`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n\n .. Fixing Weight Decay Regularization in Adam:\n https://arxiv.org/abs/1711.05101\n '
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(AdamW, self).__init__(params, defaults)
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('AdamW does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
p.data.addcdiv_((- step_size), exp_avg, denom)
if (group['weight_decay'] != 0):
p.data.add_((- group['weight_decay']), p.data)
return loss
|
def accuracy(pred, target, topk=1, thresh=None):
'Calculate accuracy according to the prediction and target.\n\n Args:\n pred (torch.Tensor): The model prediction, shape (N, num_class, ...)\n target (torch.Tensor): The target of each prediction, shape (N, , ...)\n topk (int | tuple[int], optional): If the predictions in ``topk``\n matches the target, the predictions will be regarded as\n correct ones. Defaults to 1.\n thresh (float, optional): If not None, predictions with scores under\n this threshold are considered incorrect. Default to None.\n\n Returns:\n float | tuple[float]: If the input ``topk`` is a single integer,\n the function will return a single float as accuracy. If\n ``topk`` is a tuple containing multiple integers, the\n function will return a tuple containing accuracies of\n each ``topk`` number.\n '
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk,)
return_single = True
else:
return_single = False
maxk = max(topk)
if (pred.size(0) == 0):
accu = [pred.new_tensor(0.0) for i in range(len(topk))]
return (accu[0] if return_single else accu)
assert (pred.ndim == (target.ndim + 1))
assert (pred.size(0) == target.size(0))
assert (maxk <= pred.size(1)), f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
(pred_value, pred_label) = pred.topk(maxk, dim=1)
pred_label = pred_label.transpose(0, 1)
correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label))
if (thresh is not None):
correct = (correct & (pred_value > thresh).t())
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / target.numel())))
return (res[0] if return_single else res)
|
class Accuracy(nn.Module):
'Accuracy calculation module.'
def __init__(self, topk=(1,), thresh=None):
'Module to calculate the accuracy.\n\n Args:\n topk (tuple, optional): The criterion used to calculate the\n accuracy. Defaults to (1,).\n thresh (float, optional): If not None, predictions with scores\n under this threshold are considered incorrect. Default to None.\n '
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
'Forward function to calculate accuracy.\n\n Args:\n pred (torch.Tensor): Prediction of models.\n target (torch.Tensor): Target for each prediction.\n\n Returns:\n tuple[float]: The accuracies under different topk criterions.\n '
return accuracy(pred, target, self.topk, self.thresh)
|
def reduce_loss(loss, reduction):
'Reduce loss as specified.\n\n Args:\n loss (Tensor): Elementwise loss tensor.\n reduction (str): Options are "none", "mean" and "sum".\n\n Return:\n Tensor: Reduced loss tensor.\n '
reduction_enum = F._Reduction.get_enum(reduction)
if (reduction_enum == 0):
return loss
elif (reduction_enum == 1):
return loss.mean()
elif (reduction_enum == 2):
return loss.sum()
|
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
'Apply element-wise weight and reduce loss.\n\n Args:\n loss (Tensor): Element-wise loss.\n weight (Tensor): Element-wise weights.\n reduction (str): Same as built-in losses of PyTorch.\n avg_factor (float): Avarage factor when computing the mean of losses.\n\n Returns:\n Tensor: Processed loss values.\n '
if (weight is not None):
assert (weight.dim() == loss.dim())
if (weight.dim() > 1):
assert ((weight.size(1) == 1) or (weight.size(1) == loss.size(1)))
loss = (loss * weight)
if (avg_factor is None):
loss = reduce_loss(loss, reduction)
elif (reduction == 'mean'):
loss = (loss.sum() / avg_factor)
elif (reduction != 'none'):
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
|
def weighted_loss(loss_func):
"Create a weighted version of a given loss function.\n\n To use this decorator, the loss function must have the signature like\n `loss_func(pred, target, **kwargs)`. The function only needs to compute\n element-wise loss without any reduction. This decorator will add weight\n and reduction arguments to the function. The decorated function will have\n the signature like `loss_func(pred, target, weight=None, reduction='mean',\n avg_factor=None, **kwargs)`.\n\n :Example:\n\n >>> import torch\n >>> @weighted_loss\n >>> def l1_loss(pred, target):\n >>> return (pred - target).abs()\n\n >>> pred = torch.Tensor([0, 2, 3])\n >>> target = torch.Tensor([1, 1, 1])\n >>> weight = torch.Tensor([1, 0, 1])\n\n >>> l1_loss(pred, target)\n tensor(1.3333)\n >>> l1_loss(pred, target, weight)\n tensor(1.)\n >>> l1_loss(pred, target, reduction='none')\n tensor([1., 1., 2.])\n >>> l1_loss(pred, target, weight, avg_factor=2)\n tensor(1.5000)\n "
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
|
@NECKS.register_module()
class FPN(nn.Module):
"Feature Pyramid Network.\n\n This is an implementation of - Feature Pyramid Networks for Object\n Detection (https://arxiv.org/abs/1612.03144)\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool | str): If bool, it decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, its actual mode is specified by `extra_convs_on_inputs`.\n If str, it specifies the source feature map of the extra convs.\n Only the following options are allowed\n\n - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n - 'on_lateral': Last feature map after lateral convs.\n - 'on_output': The last output feature map after fpn convs.\n extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs\n on the original feature from the backbone. If True,\n it is equivalent to `add_extra_convs='on_input'`. If False, it is\n equivalent to set `add_extra_convs='on_output'`. Default to True.\n relu_before_extra_convs (bool): Whether to apply relu before the extra\n conv. Default: False.\n no_norm_on_lateral (bool): Whether to apply norm on lateral.\n Default: False.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (str): Config dict for activation layer in ConvModule.\n Default: None.\n upsample_cfg (dict): Config dict for interpolate layer.\n Default: `dict(mode='nearest')`\n\n Example:\n >>> import torch\n >>> in_channels = [2, 3, 5, 7]\n >>> scales = [340, 170, 84, 43]\n >>> inputs = [torch.rand(1, c, s, s)\n ... for c, s in zip(in_channels, scales)]\n >>> self = FPN(in_channels, 11, len(in_channels)).eval()\n >>> outputs = self.forward(inputs)\n >>> for i in range(len(outputs)):\n ... print(f'outputs[{i}].shape = {outputs[i].shape}')\n outputs[0].shape = torch.Size([1, 11, 340, 340])\n outputs[1].shape = torch.Size([1, 11, 170, 170])\n outputs[2].shape = torch.Size([1, 11, 84, 84])\n outputs[3].shape = torch.Size([1, 11, 43, 43])\n "
def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), add_extra_convs=False, extra_convs_on_inputs=False, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, upsample_cfg=dict(mode='nearest')):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
self.upsample_cfg = upsample_cfg.copy()
if (end_level == (- 1)):
self.backbone_end_level = self.num_ins
assert (num_outs >= (self.num_ins - start_level))
else:
self.backbone_end_level = end_level
assert (end_level <= len(in_channels))
assert (num_outs == (end_level - start_level))
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
assert isinstance(add_extra_convs, (str, bool))
if isinstance(add_extra_convs, str):
assert (add_extra_convs in ('on_input', 'on_lateral', 'on_output'))
elif add_extra_convs:
if extra_convs_on_inputs:
self.add_extra_convs = 'on_input'
else:
self.add_extra_convs = 'on_output'
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=(norm_cfg if (not self.no_norm_on_lateral) else None), act_cfg=act_cfg, inplace=False)
fpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
extra_levels = ((num_outs - self.backbone_end_level) + self.start_level)
if (self.add_extra_convs and (extra_levels >= 1)):
for i in range(extra_levels):
if ((i == 0) and (self.add_extra_convs == 'on_input')):
in_channels = self.in_channels[(self.backbone_end_level - 1)]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False)
self.fpn_convs.append(extra_fpn_conv)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self, inputs):
assert (len(inputs) == len(self.in_channels))
laterals = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
used_backbone_levels = len(laterals)
for i in range((used_backbone_levels - 1), 0, (- 1)):
if ('scale_factor' in self.upsample_cfg):
laterals[(i - 1)] += F.interpolate(laterals[i], **self.upsample_cfg)
else:
prev_shape = laterals[(i - 1)].shape[2:]
laterals[(i - 1)] += F.interpolate(laterals[i], size=prev_shape, **self.upsample_cfg)
outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)]
if (self.num_outs > len(outs)):
if (not self.add_extra_convs):
for i in range((self.num_outs - used_backbone_levels)):
outs.append(F.max_pool2d(outs[(- 1)], 1, stride=2))
else:
if (self.add_extra_convs == 'on_input'):
extra_source = inputs[(self.backbone_end_level - 1)]
elif (self.add_extra_convs == 'on_lateral'):
extra_source = laterals[(- 1)]
elif (self.add_extra_convs == 'on_output'):
extra_source = outs[(- 1)]
else:
raise NotImplementedError
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
for i in range((used_backbone_levels + 1), self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[(- 1)])))
else:
outs.append(self.fpn_convs[i](outs[(- 1)]))
return tuple(outs)
|
def make_divisible(value, divisor, min_value=None, min_ratio=0.9):
'Make divisible function.\n\n This function rounds the channel number down to the nearest value that can\n be divisible by the divisor.\n\n Args:\n value (int): The original channel number.\n divisor (int): The divisor to fully divide the channel number.\n min_value (int, optional): The minimum value of the output channel.\n Default: None, means that the minimum value equal to the divisor.\n min_ratio (float, optional): The minimum ratio of the rounded channel\n number to the original channel number. Default: 0.9.\n Returns:\n int: The modified output channel number\n '
if (min_value is None):
min_value = divisor
new_value = max(min_value, ((int((value + (divisor / 2))) // divisor) * divisor))
if (new_value < (min_ratio * value)):
new_value += divisor
return new_value
|
class ResLayer(nn.Sequential):
"ResLayer to build ResNet style backbone.\n\n Args:\n block (nn.Module): block used to build ResLayer.\n inplanes (int): inplanes of block.\n planes (int): planes of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottleneck. Default: False\n conv_cfg (dict): dictionary to construct and config conv layer.\n Default: None\n norm_cfg (dict): dictionary to construct and config norm layer.\n Default: dict(type='BN')\n multi_grid (int | None): Multi grid dilation rates of last\n stage. Default: None\n contract_dilation (bool): Whether contract first dilation of each layer\n Default: False\n "
def __init__(self, block, inplanes, planes, num_blocks, stride=1, dilation=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), multi_grid=None, contract_dilation=False, **kwargs):
self.block = block
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = []
conv_stride = stride
if avg_down:
conv_stride = 1
downsample.append(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False))
downsample.extend([build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]])
downsample = nn.Sequential(*downsample)
layers = []
if (multi_grid is None):
if ((dilation > 1) and contract_dilation):
first_dilation = (dilation // 2)
else:
first_dilation = dilation
else:
first_dilation = multi_grid[0]
layers.append(block(inplanes=inplanes, planes=planes, stride=stride, dilation=first_dilation, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs))
inplanes = (planes * block.expansion)
for i in range(1, num_blocks):
layers.append(block(inplanes=inplanes, planes=planes, stride=1, dilation=(dilation if (multi_grid is None) else multi_grid[i]), conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs))
super(ResLayer, self).__init__(*layers)
|
class SelfAttentionBlock(nn.Module):
'General self-attention block/non-local block.\n\n Please refer to https://arxiv.org/abs/1706.03762 for details about key,\n query and value.\n\n Args:\n key_in_channels (int): Input channels of key feature.\n query_in_channels (int): Input channels of query feature.\n channels (int): Output channels of key/query transform.\n out_channels (int): Output channels.\n share_key_query (bool): Whether share projection weight between key\n and query projection.\n query_downsample (nn.Module): Query downsample module.\n key_downsample (nn.Module): Key downsample module.\n key_query_num_convs (int): Number of convs for key/query projection.\n value_num_convs (int): Number of convs for value projection.\n matmul_norm (bool): Whether normalize attention map with sqrt of\n channels\n with_out (bool): Whether use out projection.\n conv_cfg (dict|None): Config of conv layers.\n norm_cfg (dict|None): Config of norm layers.\n act_cfg (dict|None): Config of activation layers.\n '
def __init__(self, key_in_channels, query_in_channels, channels, out_channels, share_key_query, query_downsample, key_downsample, key_query_num_convs, value_out_num_convs, key_query_norm, value_out_norm, matmul_norm, with_out, conv_cfg, norm_cfg, act_cfg):
super(SelfAttentionBlock, self).__init__()
if share_key_query:
assert (key_in_channels == query_in_channels)
self.key_in_channels = key_in_channels
self.query_in_channels = query_in_channels
self.out_channels = out_channels
self.channels = channels
self.share_key_query = share_key_query
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.key_project = self.build_project(key_in_channels, channels, num_convs=key_query_num_convs, use_conv_module=key_query_norm, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
if share_key_query:
self.query_project = self.key_project
else:
self.query_project = self.build_project(query_in_channels, channels, num_convs=key_query_num_convs, use_conv_module=key_query_norm, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.value_project = self.build_project(key_in_channels, (channels if with_out else out_channels), num_convs=value_out_num_convs, use_conv_module=value_out_norm, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
if with_out:
self.out_project = self.build_project(channels, out_channels, num_convs=value_out_num_convs, use_conv_module=value_out_norm, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
else:
self.out_project = None
self.query_downsample = query_downsample
self.key_downsample = key_downsample
self.matmul_norm = matmul_norm
self.init_weights()
def init_weights(self):
'Initialize weight of later layer.'
if (self.out_project is not None):
if (not isinstance(self.out_project, ConvModule)):
constant_init(self.out_project, 0)
def build_project(self, in_channels, channels, num_convs, use_conv_module, conv_cfg, norm_cfg, act_cfg):
'Build projection layer for key/query/value/out.'
if use_conv_module:
convs = [ConvModule(in_channels, channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)]
for _ in range((num_convs - 1)):
convs.append(ConvModule(channels, channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
else:
convs = [nn.Conv2d(in_channels, channels, 1)]
for _ in range((num_convs - 1)):
convs.append(nn.Conv2d(channels, channels, 1))
if (len(convs) > 1):
convs = nn.Sequential(*convs)
else:
convs = convs[0]
return convs
def forward(self, query_feats, key_feats):
'Forward function.'
batch_size = query_feats.size(0)
query = self.query_project(query_feats)
if (self.query_downsample is not None):
query = self.query_downsample(query)
query = query.reshape(*query.shape[:2], (- 1))
query = query.permute(0, 2, 1).contiguous()
key = self.key_project(key_feats)
value = self.value_project(key_feats)
if (self.key_downsample is not None):
key = self.key_downsample(key)
value = self.key_downsample(value)
key = key.reshape(*key.shape[:2], (- 1))
value = value.reshape(*value.shape[:2], (- 1))
value = value.permute(0, 2, 1).contiguous()
sim_map = torch.matmul(query, key)
if self.matmul_norm:
sim_map = ((self.channels ** (- 0.5)) * sim_map)
sim_map = F.softmax(sim_map, dim=(- 1))
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.reshape(batch_size, (- 1), *query_feats.shape[2:])
if (self.out_project is not None):
context = self.out_project(context)
return context
|
class Encoding(nn.Module):
'Encoding Layer: a learnable residual encoder.\n\n Input is of shape (batch_size, channels, height, width).\n Output is of shape (batch_size, num_codes, channels).\n\n Args:\n channels: dimension of the features or feature channels\n num_codes: number of code words\n '
def __init__(self, channels, num_codes):
super(Encoding, self).__init__()
(self.channels, self.num_codes) = (channels, num_codes)
std = (1.0 / ((num_codes * channels) ** 0.5))
self.codewords = nn.Parameter(torch.empty(num_codes, channels, dtype=torch.float).uniform_((- std), std), requires_grad=True)
self.scale = nn.Parameter(torch.empty(num_codes, dtype=torch.float).uniform_((- 1), 0), requires_grad=True)
@staticmethod
def scaled_l2(x, codewords, scale):
(num_codes, channels) = codewords.size()
batch_size = x.size(0)
reshaped_scale = scale.view((1, 1, num_codes))
expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels))
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
scaled_l2_norm = (reshaped_scale * (expanded_x - reshaped_codewords).pow(2).sum(dim=3))
return scaled_l2_norm
@staticmethod
def aggregate(assigment_weights, x, codewords):
(num_codes, channels) = codewords.size()
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
batch_size = x.size(0)
expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels))
encoded_feat = (assigment_weights.unsqueeze(3) * (expanded_x - reshaped_codewords)).sum(dim=1)
return encoded_feat
def forward(self, x):
assert ((x.dim() == 4) and (x.size(1) == self.channels))
batch_size = x.size(0)
x = x.view(batch_size, self.channels, (- 1)).transpose(1, 2).contiguous()
assigment_weights = F.softmax(self.scaled_l2(x, self.codewords, self.scale), dim=2)
encoded_feat = self.aggregate(assigment_weights, x, self.codewords)
return encoded_feat
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(Nx{self.channels}xHxW =>Nx{self.num_codes}x{self.channels})'
return repr_str
|
class DepthwiseSeparableConvModule(nn.Module):
"Depthwise separable convolution module.\n\n See https://arxiv.org/pdf/1704.04861.pdf for details.\n\n This module can replace a ConvModule with the conv block replaced by two\n conv block: depthwise conv block and pointwise conv block. The depthwise\n conv block contains depthwise-conv/norm/activation layers. The pointwise\n conv block contains pointwise-conv/norm/activation layers. It should be\n noted that there will be norm/activation layer in the depthwise conv block\n if `norm_cfg` and `act_cfg` are specified.\n\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int or tuple[int]): Same as nn.Conv2d.\n stride (int or tuple[int]): Same as nn.Conv2d. Default: 1.\n padding (int or tuple[int]): Same as nn.Conv2d. Default: 0.\n dilation (int or tuple[int]): Same as nn.Conv2d. Default: 1.\n norm_cfg (dict): Default norm config for both depthwise ConvModule and\n pointwise ConvModule. Default: None.\n act_cfg (dict): Default activation config for both depthwise ConvModule\n and pointwise ConvModule. Default: dict(type='ReLU').\n dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is\n 'default', it will be the same as `norm_cfg`. Default: 'default'.\n dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is\n 'default', it will be the same as `act_cfg`. Default: 'default'.\n pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is\n 'default', it will be the same as `norm_cfg`. Default: 'default'.\n pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is\n 'default', it will be the same as `act_cfg`. Default: 'default'.\n kwargs (optional): Other shared arguments for depthwise and pointwise\n ConvModule. See ConvModule for ref.\n "
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, norm_cfg=None, act_cfg=dict(type='ReLU'), dw_norm_cfg='default', dw_act_cfg='default', pw_norm_cfg='default', pw_act_cfg='default', **kwargs):
super(DepthwiseSeparableConvModule, self).__init__()
assert ('groups' not in kwargs), 'groups should not be specified'
dw_norm_cfg = (dw_norm_cfg if (dw_norm_cfg != 'default') else norm_cfg)
dw_act_cfg = (dw_act_cfg if (dw_act_cfg != 'default') else act_cfg)
pw_norm_cfg = (pw_norm_cfg if (pw_norm_cfg != 'default') else norm_cfg)
pw_act_cfg = (pw_act_cfg if (pw_act_cfg != 'default') else act_cfg)
self.depthwise_conv = ConvModule(in_channels, in_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, norm_cfg=dw_norm_cfg, act_cfg=dw_act_cfg, **kwargs)
self.pointwise_conv = ConvModule(in_channels, out_channels, 1, norm_cfg=pw_norm_cfg, act_cfg=pw_act_cfg, **kwargs)
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
|
def resize(input, size=None, scale_factor=None, mode='nearest', align_corners=None, warning=True):
if warning:
if ((size is not None) and align_corners):
(input_h, input_w) = tuple((int(x) for x in input.shape[2:]))
(output_h, output_w) = tuple((int(x) for x in size))
if ((output_h > input_h) or (output_w > output_h)):
if (((output_h > 1) and (output_w > 1) and (input_h > 1) and (input_w > 1)) and ((output_h - 1) % (input_h - 1)) and ((output_w - 1) % (input_w - 1))):
warnings.warn(f'When align_corners={align_corners}, the output would more aligned if input size {(input_h, input_w)} is `x+1` and out size {(output_h, output_w)} is `nx+1`')
if isinstance(size, torch.Size):
size = tuple((int(x) for x in size))
return F.interpolate(input, size, scale_factor, mode, align_corners)
|
class Upsample(nn.Module):
def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=None):
super(Upsample, self).__init__()
self.size = size
if isinstance(scale_factor, tuple):
self.scale_factor = tuple((float(factor) for factor in scale_factor))
else:
self.scale_factor = (float(scale_factor) if scale_factor else None)
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
if (not self.size):
size = [int((t * self.scale_factor)) for t in x.shape[(- 2):]]
else:
size = self.size
return resize(x, size, None, self.mode, self.align_corners)
|
def collect_env():
'Collect the information of the running environments.'
env_info = {}
env_info['sys.platform'] = sys.platform
env_info['Python'] = sys.version.replace('\n', '')
cuda_available = torch.cuda.is_available()
env_info['CUDA available'] = cuda_available
if cuda_available:
from mmcv.utils.parrots_wrapper import CUDA_HOME
env_info['CUDA_HOME'] = CUDA_HOME
if ((CUDA_HOME is not None) and osp.isdir(CUDA_HOME)):
try:
nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
nvcc = subprocess.check_output('"{}" -V | tail -n1'.format(nvcc), shell=True)
nvcc = nvcc.decode('utf-8').strip()
except subprocess.SubprocessError:
nvcc = 'Not Available'
env_info['NVCC'] = nvcc
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for (name, devids) in devices.items():
env_info[('GPU ' + ','.join(devids))] = name
try:
gcc = subprocess.check_output('gcc --version | head -n1', shell=True)
gcc = gcc.decode('utf-8').strip()
env_info['GCC'] = gcc
except subprocess.CalledProcessError:
env_info['GCC'] = 'n/a'
env_info['PyTorch'] = torch.__version__
env_info['PyTorch compiling details'] = get_build_config()
env_info['TorchVision'] = torchvision.__version__
env_info['OpenCV'] = cv2.__version__
env_info['MMCV'] = mmcv.__version__
env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}'
try:
from mmcv.ops import get_compiler_version, get_compiling_cuda_version
env_info['MMCV Compiler'] = get_compiler_version()
env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version()
except ImportError:
env_info['MMCV Compiler'] = 'n/a'
env_info['MMCV CUDA Compiler'] = 'n/a'
return env_info
|
class InvertedResidual(nn.Module):
"Inverted residual module.\n\n Args:\n in_channels (int): The input channels of the InvertedResidual block.\n out_channels (int): The output channels of the InvertedResidual block.\n stride (int): Stride of the middle (first) 3x3 convolution.\n expand_ratio (int): adjusts number of channels of the hidden layer\n in InvertedResidual by this amount.\n conv_cfg (dict): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU6').\n "
def __init__(self, in_channels, out_channels, stride, expand_ratio, dilation=1, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU6')):
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
hidden_dim = int(round((in_channels * expand_ratio)))
self.use_res_connect = ((self.stride == 1) and (in_channels == out_channels))
layers = []
if (expand_ratio != 1):
layers.append(ConvModule(in_channels, hidden_dim, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
layers.extend([ConvModule(hidden_dim, hidden_dim, kernel_size=3, padding=dilation, stride=stride, dilation=dilation, groups=hidden_dim, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg), nn.Conv2d(hidden_dim, out_channels, 1, 1, 0, bias=False), build_norm_layer(norm_cfg, out_channels)[1]])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x)
|
def get_root_logger(log_file=None, log_level=logging.INFO):
'Get the root logger.\n\n The logger will be initialized if it has not been initialized. By default a\n StreamHandler will be added. If `log_file` is specified, a FileHandler will\n also be added. The name of the root logger is the top-level package name,\n e.g., "mmseg".\n\n Args:\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the root logger.\n log_level (int): The root logger level. Note that only the process of\n rank 0 is affected, while other processes will set the level to\n "Error" and be silent most of the time.\n\n Returns:\n logging.Logger: The root logger.\n '
logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level)
return logger
|
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif (x.find('rc') != (- 1)):
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
|
def _get_config_directory():
'Find the predefined segmentor config directory.'
try:
repo_dpath = dirname(dirname(__file__))
except NameError:
import mmseg
repo_dpath = dirname(dirname(mmseg.__file__))
config_dpath = join(repo_dpath, 'configs')
if (not exists(config_dpath)):
raise Exception('Cannot find config path')
return config_dpath
|
def test_config_build_segmentor():
'Test that all segmentation models defined in the configs can be\n initialized.'
config_dpath = _get_config_directory()
print('Found config_dpath = {!r}'.format(config_dpath))
config_fpaths = []
for sub_folder in os.listdir(config_dpath):
if isdir(sub_folder):
config_fpaths.append(list(glob.glob(join(config_dpath, sub_folder, '*.py')))[0])
config_fpaths = [p for p in config_fpaths if (p.find('_base_') == (- 1))]
config_names = [relpath(p, config_dpath) for p in config_fpaths]
print('Using {} config files'.format(len(config_names)))
for config_fname in config_names:
config_fpath = join(config_dpath, config_fname)
config_mod = Config.fromfile(config_fpath)
config_mod.model
config_mod.train_cfg
config_mod.test_cfg
print('Building segmentor, config_fpath = {!r}'.format(config_fpath))
if ('pretrained' in config_mod.model):
config_mod.model['pretrained'] = None
print('building {}'.format(config_fname))
segmentor = build_segmentor(config_mod.model, train_cfg=config_mod.train_cfg, test_cfg=config_mod.test_cfg)
assert (segmentor is not None)
head_config = config_mod.model['decode_head']
_check_decode_head(head_config, segmentor.decode_head)
|
def test_config_data_pipeline():
'Test whether the data pipeline is valid and can process corner cases.\n\n CommandLine:\n xdoctest -m tests/test_config.py test_config_build_data_pipeline\n '
from mmcv import Config
from mmseg.datasets.pipelines import Compose
import numpy as np
config_dpath = _get_config_directory()
print('Found config_dpath = {!r}'.format(config_dpath))
import glob
config_fpaths = list(glob.glob(join(config_dpath, '**', '*.py')))
config_fpaths = [p for p in config_fpaths if (p.find('_base_') == (- 1))]
config_names = [relpath(p, config_dpath) for p in config_fpaths]
print('Using {} config files'.format(len(config_names)))
for config_fname in config_names:
config_fpath = join(config_dpath, config_fname)
print('Building data pipeline, config_fpath = {!r}'.format(config_fpath))
config_mod = Config.fromfile(config_fpath)
load_img_pipeline = config_mod.train_pipeline.pop(0)
to_float32 = load_img_pipeline.get('to_float32', False)
config_mod.train_pipeline.pop(0)
config_mod.test_pipeline.pop(0)
train_pipeline = Compose(config_mod.train_pipeline)
test_pipeline = Compose(config_mod.test_pipeline)
img = np.random.randint(0, 255, size=(1024, 2048, 3), dtype=np.uint8)
if to_float32:
img = img.astype(np.float32)
seg = np.random.randint(0, 255, size=(1024, 2048, 1), dtype=np.uint8)
results = dict(filename='test_img.png', ori_filename='test_img.png', img=img, img_shape=img.shape, ori_shape=img.shape, gt_semantic_seg=seg)
results['seg_fields'] = ['gt_semantic_seg']
print('Test training data pipeline: \n{!r}'.format(train_pipeline))
output_results = train_pipeline(results)
assert (output_results is not None)
results = dict(filename='test_img.png', ori_filename='test_img.png', img=img, img_shape=img.shape, ori_shape=img.shape)
print('Test testing data pipeline: \n{!r}'.format(test_pipeline))
output_results = test_pipeline(results)
assert (output_results is not None)
|
def _check_decode_head(decode_head_cfg, decode_head):
if isinstance(decode_head_cfg, list):
assert isinstance(decode_head, nn.ModuleList)
assert (len(decode_head_cfg) == len(decode_head))
num_heads = len(decode_head)
for i in range(num_heads):
_check_decode_head(decode_head_cfg[i], decode_head[i])
return
assert (decode_head_cfg['type'] == decode_head.__class__.__name__)
assert (decode_head_cfg['type'] == decode_head.__class__.__name__)
in_channels = decode_head_cfg.in_channels
input_transform = decode_head.input_transform
assert (input_transform in ['resize_concat', 'multiple_select', None])
if (input_transform is not None):
assert isinstance(in_channels, (list, tuple))
assert isinstance(decode_head.in_index, (list, tuple))
assert (len(in_channels) == len(decode_head.in_index))
elif (input_transform == 'resize_concat'):
assert (sum(in_channels) == decode_head.in_channels)
else:
assert isinstance(in_channels, int)
assert (in_channels == decode_head.in_channels)
assert isinstance(decode_head.in_index, int)
if (decode_head_cfg['type'] == 'PointHead'):
assert ((decode_head_cfg.channels + decode_head_cfg.num_classes) == decode_head.fc_seg.in_channels)
assert (decode_head.fc_seg.out_channels == decode_head_cfg.num_classes)
else:
assert (decode_head_cfg.channels == decode_head.conv_seg.in_channels)
assert (decode_head.conv_seg.out_channels == decode_head_cfg.num_classes)
|
def test_classes():
assert (list(CityscapesDataset.CLASSES) == get_classes('cityscapes'))
assert (list(PascalVOCDataset.CLASSES) == get_classes('voc') == get_classes('pascal_voc'))
assert (list(ADE20KDataset.CLASSES) == get_classes('ade') == get_classes('ade20k'))
with pytest.raises(ValueError):
get_classes('unsupported')
|
def test_palette():
assert (CityscapesDataset.PALETTE == get_palette('cityscapes'))
assert (PascalVOCDataset.PALETTE == get_palette('voc') == get_palette('pascal_voc'))
assert (ADE20KDataset.PALETTE == get_palette('ade') == get_palette('ade20k'))
with pytest.raises(ValueError):
get_palette('unsupported')
|
@patch('mmseg.datasets.CustomDataset.load_annotations', MagicMock)
@patch('mmseg.datasets.CustomDataset.__getitem__', MagicMock(side_effect=(lambda idx: idx)))
def test_dataset_wrapper():
dataset_a = CustomDataset(img_dir=MagicMock(), pipeline=[])
len_a = 10
dataset_a.img_infos = MagicMock()
dataset_a.img_infos.__len__.return_value = len_a
dataset_b = CustomDataset(img_dir=MagicMock(), pipeline=[])
len_b = 20
dataset_b.img_infos = MagicMock()
dataset_b.img_infos.__len__.return_value = len_b
concat_dataset = ConcatDataset([dataset_a, dataset_b])
assert (concat_dataset[5] == 5)
assert (concat_dataset[25] == 15)
assert (len(concat_dataset) == (len(dataset_a) + len(dataset_b)))
repeat_dataset = RepeatDataset(dataset_a, 10)
assert (repeat_dataset[5] == 5)
assert (repeat_dataset[15] == 5)
assert (repeat_dataset[27] == 7)
assert (len(repeat_dataset) == (10 * len(dataset_a)))
|
def test_custom_dataset():
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 1024)
train_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='Resize', img_scale=(128, 256), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg'])]
test_pipeline = [dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(128, 256), flip=False, transforms=[dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])]
train_dataset = CustomDataset(train_pipeline, data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), img_dir='imgs/', ann_dir='gts/', img_suffix='img.jpg', seg_map_suffix='gt.png')
assert (len(train_dataset) == 5)
train_dataset = CustomDataset(train_pipeline, data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), img_dir='imgs/', ann_dir='gts/', img_suffix='img.jpg', seg_map_suffix='gt.png', split='splits/train.txt')
assert (len(train_dataset) == 4)
train_dataset = CustomDataset(train_pipeline, img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'), ann_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/gts'), img_suffix='img.jpg', seg_map_suffix='gt.png')
assert (len(train_dataset) == 5)
train_dataset = CustomDataset(train_pipeline, data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), img_dir=osp.abspath(osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs')), ann_dir=osp.abspath(osp.join(osp.dirname(__file__), '../data/pseudo_dataset/gts')), img_suffix='img.jpg', seg_map_suffix='gt.png')
assert (len(train_dataset) == 5)
test_dataset = CustomDataset(test_pipeline, img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'), img_suffix='img.jpg', test_mode=True)
assert (len(test_dataset) == 5)
train_data = train_dataset[0]
assert isinstance(train_data, dict)
test_data = test_dataset[0]
assert isinstance(test_data, dict)
gt_seg_maps = train_dataset.get_gt_seg_maps()
assert (len(gt_seg_maps) == 5)
pseudo_results = []
for gt_seg_map in gt_seg_maps:
(h, w) = gt_seg_map.shape
pseudo_results.append(np.random.randint(low=0, high=7, size=(h, w)))
eval_results = train_dataset.evaluate(pseudo_results)
assert isinstance(eval_results, dict)
assert ('mIoU' in eval_results)
assert ('mAcc' in eval_results)
assert ('aAcc' in eval_results)
train_dataset.CLASSES = tuple((['a'] * 7))
eval_results = train_dataset.evaluate(pseudo_results)
assert isinstance(eval_results, dict)
assert ('mIoU' in eval_results)
assert ('mAcc' in eval_results)
assert ('aAcc' in eval_results)
|
@DATASETS.register_module()
class ToyDataset(object):
def __init__(self, cnt=0):
self.cnt = cnt
def __item__(self, idx):
return idx
def __len__(self):
return 100
|
def test_build_dataset():
cfg = dict(type='ToyDataset')
dataset = build_dataset(cfg)
assert isinstance(dataset, ToyDataset)
assert (dataset.cnt == 0)
dataset = build_dataset(cfg, default_args=dict(cnt=1))
assert isinstance(dataset, ToyDataset)
assert (dataset.cnt == 1)
data_root = osp.join(osp.dirname(__file__), '../data/pseudo_dataset')
img_dir = 'imgs/'
ann_dir = 'gts/'
cfg = dict(type='CustomDataset', pipeline=[], data_root=data_root, img_dir=[img_dir, img_dir], ann_dir=[ann_dir, ann_dir])
dataset = build_dataset(cfg)
assert isinstance(dataset, ConcatDataset)
assert (len(dataset) == 10)
cfg = dict(type='CustomDataset', pipeline=[], data_root=data_root, img_dir=img_dir, ann_dir=ann_dir, split=['splits/train.txt', 'splits/val.txt'])
dataset = build_dataset(cfg)
assert isinstance(dataset, ConcatDataset)
assert (len(dataset) == 5)
cfg = dict(type='CustomDataset', pipeline=[], data_root=data_root, img_dir=img_dir, ann_dir=[ann_dir, ann_dir], split=['splits/train.txt', 'splits/val.txt'])
dataset = build_dataset(cfg)
assert isinstance(dataset, ConcatDataset)
assert (len(dataset) == 5)
cfg = dict(type='CustomDataset', pipeline=[], data_root=data_root, img_dir=[img_dir, img_dir], test_mode=True)
dataset = build_dataset(cfg)
assert isinstance(dataset, ConcatDataset)
assert (len(dataset) == 10)
cfg = dict(type='CustomDataset', pipeline=[], data_root=data_root, img_dir=[img_dir, img_dir], split=['splits/val.txt', 'splits/val.txt'], test_mode=True)
dataset = build_dataset(cfg)
assert isinstance(dataset, ConcatDataset)
assert (len(dataset) == 2)
with pytest.raises(AssertionError):
cfg = dict(type='CustomDataset', pipeline=[], data_root=data_root, img_dir=[img_dir, img_dir], ann_dir=[ann_dir, ann_dir, ann_dir])
build_dataset(cfg)
with pytest.raises(AssertionError):
cfg = dict(type='CustomDataset', pipeline=[], data_root=data_root, img_dir=[img_dir, img_dir], split=['splits/val.txt', 'splits/val.txt', 'splits/val.txt'])
build_dataset(cfg)
with pytest.raises(AssertionError):
cfg = dict(type='CustomDataset', pipeline=[], data_root=data_root, img_dir=img_dir, ann_dir=[ann_dir, ann_dir], split=['splits/val.txt', 'splits/val.txt', 'splits/val.txt'])
build_dataset(cfg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.