code
stringlengths 17
6.64M
|
---|
class White(nn.Module):
def __init__(self, dim, variance=1.0):
super(White, self).__init__()
self.dim = torch.tensor([dim], requires_grad=False)
self.variance = torch.nn.Parameter(transform_backward(torch.tensor([variance])))
def forward(self, X, X2=None):
if (X2 is None):
return (torch.eye(X.size()[0]) * transform_forward(self.variance))
else:
return 0.0
|
class Add(nn.Module):
def __init__(self, k1, k2):
super(Add, self).__init__()
self.k1 = k1
self.k2 = k2
@property
def variance(self):
return transform_backward((transform_forward(self.k1.variance) + transform_forward(self.k2.variance)))
def forward(self, X, X2=None):
return (self.k1(X, X2) + self.k2(X, X2))
|
class Sparse1DTensor():
def __init__(self, y, ix):
self.v = torch.tensor(y, dtype=dtype, requires_grad=False)
ix_tensor = torch.tensor(ix)
assert (self.v.numel() == ix_tensor.numel()), 'inputs must be same size'
self.ix = {ix_tensor[i].item(): i for i in torch.arange(self.v.numel())}
def __getitem__(self, k):
if (not len(k.size())):
return self.v[self.ix[k.item()]]
else:
return torch.tensor([self.v[self.ix[kk]] for kk in k.tolist()])
def __setitem__(self, k, v):
if (not len(k.size())):
self.v[self.ix[k.item()]] = v
else:
for (kk, vv) in zip(k.tolist(), v.tolist()):
self.v[self.ix[kk]] = vv
|
class BatchIndices():
def __init__(self, N=None, ix=None, B=None):
assert ((N is not None) or (ix is not None)), 'either N or ix should be provided'
if ((N is not None) and (ix is not None)):
assert (N == ix.numel()), 'N must = size of ix'
self.N = N
self.ix = ix
elif (N is not None):
self.N = N
self.ix = torch.arange(N)
else:
self.ix = ix
self.N = ix.numel()
if (B is None):
self.B = self.N
else:
if (B > self.N):
B = self.N
self.B = B
self.perm = torch.randperm(self.N, requires_grad=False)
def __call__(self, B=None):
if (B is None):
B = self.B
else:
assert (B <= self.N), 'Batch size must be <= data size'
m = torch.min(torch.tensor([B, self.perm.numel()]))
d = (self.perm.numel() - B)
ix_batch = self.perm[:m]
if (d <= 0):
self.perm = torch.randperm(self.N)
if (d < 0):
ix_batch = torch.cat((ix_batch, self.perm[:(- d)]))
self.perm = self.perm[(- d):]
else:
self.perm = self.perm[m:]
return self.ix[ix_batch]
|
def main():
parser = ArgumentParser()
parser.add_argument('--mode', default='train')
parser.add_argument('--save_dir', default='logire-save')
parser.add_argument('--train_batch_size', type=int, default=4)
parser.add_argument('--test_batch_size', type=int, default=4)
parser.add_argument('--Ns', type=int, default=50, help='size of the latent rule set')
parser.add_argument('--num_epochs', type=int, default=50, help='number of training epochs for the relation extractor')
parser.add_argument('--warmup_ratio', type=float, default=0.06)
parser.add_argument('--rel_num', type=int, default=65, help='number of relation types')
parser.add_argument('--ent_num', type=int, default=10, help='number of entity types')
parser.add_argument('--n_iters', type=int, default=10, help='number of iterations')
parser.add_argument('--max_depth', type=int, default=3, help='max depth of the rules')
parser.add_argument('--data_dir', default='../kbp-benchmarks/DWIE/data/docred-style')
parser.add_argument('--backbone_path', default='data/dwie-atlop.dump')
parser.add_argument('--rule_path', default='data/dwie.grules.json')
args = parser.parse_args()
if (args.mode == 'train'):
logire = LogiRE(args)
logire.EM_optimization()
elif (args.mode == 'test'):
logire = LogiRE(args)
(dev_ret, test_ret) = logire.evaluate_base()
print(((('#' * 100) + '\n# Evaluating Backbone\n') + ('#' * 100)))
print('dev ', dev_ret)
print('test', test_ret)
collate_fn = get_backbone_collate_fn(0)
dev_data = BackboneDataset(logire.re_reader.read('dev'), logire.type_masks['dev'], logire.dists['dev'])
dev_loader = DataLoader(dev_data, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn)
test_data = BackboneDataset(logire.re_reader.read('test'), logire.type_masks['test'], logire.dists['test'])
test_loader = DataLoader(test_data, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn)
print(((('#' * 100) + '\n# Evaluating LogiRE\n') + ('#' * 100)))
for iter_i in range((args.n_iters + 1)):
print(((('-' * 45) + f'Iter {iter_i}') + ('-' * 50)))
save_path = os.path.join(args.save_dir, f'scorer-{iter_i}.pt')
model = RelationExtractor(torch.load(save_path))
dev_ret = logire.evaluate_relation_extractor(model, dev_loader)
print('dev ', dev_ret)
test_ret = logire.evaluate_relation_extractor(model, test_loader, dev_ret['theta'])
print('test', test_ret)
else:
raise ValueError(f'Unknown mode {args.mode}')
|
class FeatureReader(object):
def __init__(self, data_path) -> None:
self.data = torch.load(data_path)
def read(self, split='train'):
return self.data[split]
|
class TextReader(object):
'read text feature'
'read and store DocRED data'
def __init__(self, data_dir, save_dir, tokenizer) -> None:
self.data_dir = data_dir
self.save_dir = save_dir
if (not os.path.exists(self.save_dir)):
os.makedirs(self.save_dir)
with open(os.path.join(data_dir, 'rel_info.json')) as fp:
self.rel2info = json.load(fp)
self.id2rel = sorted(list(self.rel2info.keys()))
self.rel2id = {r: i for (i, r) in enumerate(self.id2rel)}
self.data_paths = {'train': os.path.join(data_dir, 'train_annotated.json'), 'dist': os.path.join(data_dir, 'train_distant.json'), 'dev': os.path.join(data_dir, 'dev.json'), 'test': os.path.join(data_dir, 'test.json')}
self.bin_paths = {'train': os.path.join(save_dir, 'train.pth'), 'dist': os.path.join(save_dir, 'dist.pth'), 'dev': os.path.join(save_dir, 'dev.pth'), 'test': os.path.join(save_dir, 'test.pth')}
self.tokenizer = tokenizer
def read(self, split='train'):
bin_path = self.bin_paths[split]
if os.path.exists(bin_path):
return torch.load(bin_path)
else:
features = self.read_raw(split)
torch.save(features, bin_path)
return features
def read_raw(self, split='train', max_seq_length=1024):
with open(self.data_paths[split]) as fp:
data = json.load(fp)
features = []
for item in tqdm(data, desc='reading raw data'):
sents = []
sent_map = []
entities = item['vertexSet']
(entity_start, entity_end) = ([], [])
for entity in entities:
types = []
for mention in entity:
sent_id = mention['sent_id']
pos = mention['pos']
entity_start.append((sent_id, pos[0]))
entity_end.append((sent_id, (pos[1] - 1)))
for (i_s, sent) in enumerate(item['sents']):
new_map = {}
for (i_t, token) in enumerate(sent):
tokens_wordpiece = self.tokenizer.tokenize(token)
if ((i_s, i_t) in entity_start):
tokens_wordpiece = (['*'] + tokens_wordpiece)
if ((i_s, i_t) in entity_end):
tokens_wordpiece = (tokens_wordpiece + ['*'])
new_map[i_t] = len(sents)
sents.extend(tokens_wordpiece)
new_map[(i_t + 1)] = len(sents)
sent_map.append(new_map)
entity_pos = []
for e in entities:
entity_pos.append([])
for m in e:
start = sent_map[m['sent_id']][m['pos'][0]]
end = sent_map[m['sent_id']][m['pos'][1]]
entity_pos[(- 1)].append((start, end))
labels = torch.zeros(len(entities), len(entities), len(self.rel2id), dtype=torch.bool)
if ('labels' in item):
for fact in item['labels']:
labels[(fact['h'], fact['t'], self.rel2id[fact['r']])] = 1
sents = sents[:(max_seq_length - 2)]
input_ids = self.tokenizer.convert_tokens_to_ids(sents)
input_ids = self.tokenizer.build_inputs_with_special_tokens(input_ids)
features.append({'input_ids': input_ids, 'entity_pos': entity_pos, 'title': item['title'], 'N': len(entities), 'labels': labels.to_sparse()})
return features
def get_prior(self, split='train'):
train_data = self.read(split)
total = 0.0
pos = torch.zeros([len(self.rel2id)])
for f in tqdm(train_data):
labels = f['labels'].float().to_dense()
pos += labels.sum(dim=(0, 1))
total += (labels.size(0) ** 2)
return (pos / total)
|
class ERuleReader(object):
'read text feature'
'read and store DocRED data'
def __init__(self, data_dir, save_dir, max_step=3) -> None:
self.data_dir = data_dir
self.save_dir = save_dir
if (not os.path.exists(self.save_dir)):
os.makedirs(self.save_dir)
self.rel2id = {k: (v - 1) for (k, v) in json.load(open(os.path.join(data_dir, 'meta/rel2id.json'))).items()}
self.id2rel = {k: v for (v, k) in self.rel2id.items()}
self.R = (len(self.rel2id) - 1)
self.type2id = json.load(open(os.path.join(data_dir, 'meta/ner2id.json')))
self.id2type = {k: v for (v, k) in self.type2id.items()}
self.data_paths = {'rtrain': os.path.join(data_dir, 'rtrain.json'), 'train': os.path.join(data_dir, 'train_annotated.json'), 'dist': os.path.join(data_dir, 'train_distant.json'), 'dev': os.path.join(data_dir, 'dev.json'), 'test': os.path.join(data_dir, 'test.json')}
self.bin_paths = {'rtrain': os.path.join(save_dir, 'cooccur-rtrain.pth'), 'train': os.path.join(save_dir, 'cooccur-train.pth'), 'dist': os.path.join(save_dir, 'cooccur-dist.pth'), 'dev': os.path.join(save_dir, 'cooccur-dev.pth'), 'test': os.path.join(save_dir, 'cooccur-test.pth')}
self.max_step = max_step
def read(self, split='train'):
bin_path = self.bin_paths[split]
if os.path.exists(bin_path):
return torch.load(bin_path)
else:
features = self.read_raw(split)
torch.save(features, bin_path)
return features
def read_raw(self, split='train'):
'count co-occurence info'
max_step = self.max_step
r2epair = self.get_r2epair()
rule_counter = {(i, h, t): Counter() for i in range(self.R) for (h, t) in r2epair[i]}
with open(self.data_paths[split]) as fp:
data = json.load(fp)
for item in tqdm(data, desc='reading raw data'):
entities = item['vertexSet']
entity_types = [self.type2id[e[0]['type']] for e in entities]
paths = {}
meta_paths = {1: paths}
for fact in item['labels']:
(h, t, r) = (fact['h'], fact['t'], self.rel2id[fact['r']])
if (h not in paths):
paths[h] = {t: [([r], [t])]}
elif (t not in paths[h]):
paths[h][t] = [([r], [t])]
else:
paths[h][t].append(([r], [t]))
if (t not in paths):
paths[t] = {h: [([(r + self.R)], [h])]}
elif (h not in paths[t]):
paths[t][h] = [([(r + self.R)], [h])]
else:
paths[t][h].append(([(r + self.R)], [h]))
for step in range(2, (max_step + 1)):
prev_paths = meta_paths[(step - 1)]
paths = {}
for h in prev_paths:
for (inode, prev_chain) in prev_paths[h].items():
if (inode in meta_paths[1]):
for (t, rs) in meta_paths[1][inode].items():
if (h == t):
continue
new_chain = append_chain(prev_chain, rs)
if (not new_chain):
continue
if (h not in paths):
paths[h] = {t: new_chain}
elif (t not in paths[h]):
paths[h][t] = new_chain
else:
paths[h][t].extend(new_chain)
meta_paths[step] = paths
for h in meta_paths[1]:
for (t, rs) in meta_paths[1][h].items():
c_meta_paths = set()
for step in range(1, (max_step + 1)):
if ((h in meta_paths[step]) and (t in meta_paths[step][h])):
for path in meta_paths[step][h][t]:
c_meta_paths.add(tuple(path[0]))
for r in rs:
if (r[0][0] >= self.R):
continue
triple = (r[0][0], entity_types[h], entity_types[t])
rule_counter[triple].update(c_meta_paths)
triples = []
triple2rules = {}
triple2probs = {}
lens = [len(epair) for epair in r2epair]
for (ri, epairs) in enumerate(r2epair):
for epair in epairs:
triple = (ri, epair[0], epair[1])
total = sum(rule_counter[triple].values())
(rules, probs) = ([], [])
for rule in rule_counter[triple]:
rules.append(rule)
probs.append((rule_counter[triple][rule] / total))
triples.append(triple)
triple2rules[triple] = rules
triple2probs[triple] = probs
features = {'triples': triples, 'sections': lens, 'triple2rules': triple2rules, 'triple2probs': triple2probs}
return features
def get_r2epair(self):
r2epair = [[] for _ in range((len(self.rel2id) - 1))]
with open(self.data_paths['train']) as fp:
data = json.load(fp)
for item in data:
entities = item['vertexSet']
entity_types = [self.type2id[e[0]['type']] for e in entities]
for fact in item['labels']:
(h, t, r) = (entity_types[fact['h']], entity_types[fact['t']], self.rel2id[fact['r']])
if ((h, t) not in r2epair[r]):
r2epair[r].append((h, t))
return r2epair
def get_epair2r(self):
e_pair2r = torch.zeros(len(self.type2id), len(self.type2id), (len(self.rel2id) - 1)).bool()
with open(self.data_paths['train']) as fp:
data = json.load(fp)
for item in data:
entities = item['vertexSet']
entity_types = [self.type2id[e[0]['type']] for e in entities]
for fact in item['labels']:
(h, t, r) = (fact['h'], fact['t'], self.rel2id[fact['r']])
e_pair2r[(entity_types[h], entity_types[t], r)] = 1
print(e_pair2r.size(), e_pair2r.sum())
return e_pair2r
def get_type_mask(self, triples, sections, split='train'):
ntypes = len(self.type2id)
rpair2id = [{} for _ in sections]
tid = 0
for section in sections:
for sid in range(section):
(r, e1, e2) = triples[tid]
rpair2id[r][(e1, e2)] = sid
tid += 1
triple2sid = torch.CharTensor(ntypes, ntypes, self.R).fill_((- 1))
for ei in range(ntypes):
for ej in range(ntypes):
for r in range(self.R):
triple2sid[(ei, ej, r)] = rpair2id[r].get((ei, ej), (- 1))
with open(self.data_paths[split]) as fp:
data = json.load(fp)
type_masks = []
for item in data:
entities = item['vertexSet']
N = len(entities)
entity_types = torch.tensor([self.type2id[e[0]['type']] for e in entities])
type_indices = (entity_types.unsqueeze(1).repeat(1, N), entity_types.unsqueeze(0).repeat(N, 1))
type_mask = triple2sid[(type_indices[0], type_indices[1])]
type_masks.append(type_mask)
return type_masks
def get_dist(self, split='train'):
with open(self.data_paths[split]) as fp:
data = json.load(fp)
dists = []
for item in tqdm(data, desc='reading raw data'):
entities = item['vertexSet']
N = len(entities)
entities_pos = []
for entity in entities:
s = entity[0]['pos'][0]
e = entity[0]['pos'][1]
entities_pos.append([s, e])
dist = torch.zeros(N, N)
for h in range(N):
for t in range(N):
(sh, eh) = entities_pos[h]
(st, et) = entities_pos[t]
dist[(h, t)] = min(abs((sh - et)), abs((st - eh)))
dists.append(dist)
return dists
|
def append_chain(chains, rs):
ret = []
for (chain, chain_nodes) in chains:
for (r, rnode) in rs:
if (rnode[0] not in chain_nodes):
ret.append(((chain + r), (chain_nodes + rnode)))
return ret
|
class BratConverter():
'\n Encapsulates the paths to convert SORE filtered files to BRAT annotations (for visualisation of results).\n '
def __init__(self, paths_to_datasets, narrowIE_path, SORE_processed_path, BRAT_output_path):
'\n Initialise BratConverter with relevant paths.\n\n :param paths_to_datasets: List of paths to unprocessed data sets. Can be used to retrieve document meta-data, such as\n the category of a document in the OA-STM corpus. If these categories are found, the stats per category will be printed.\n :param narrowIE_path: Path to CSV file with narrow IE extractions.\n :param SORE_processed_path: Path to directory with the files that were filtered with SORE. Unfiltered json-files\n can be found here as well, so that the entire document can be converted to BRAT (not just filtered sentences).\n :param BRAT_output_path: Path to write the output .ann and .txt files for visualisation with BRAT\n '
self.paths_to_datasets = paths_to_datasets
self.narrowIE_csv = narrowIE_path
self.SORE_path = SORE_processed_path
self.BRAT_output_path = BRAT_output_path
def get_complete_span(self, w_list, sentence):
'\n Determine the string in the original sentence spanned by the first and last word of an argument-list.\n\n :param w_list: List of words for an argument.\n :param sentence: Sentence from which the argument was extracted.\n :return: The string in the sentence that corresponds to the argument-list.\n '
str_to_search = None
w_list = [w for w in w_list if (w not in [')', '('])]
if (len(w_list) == 1):
str_to_search = w_list[0]
str_to_search = str_to_search.replace('(', '\\(').replace(')', '\\)')
elif (len(w_list) > 1):
try:
str_to_search = ((w_list[0] + re.search(((w_list[0] + '(.*?)') + w_list[(- 1)]), sentence).group(1)) + w_list[(- 1)])
except:
print('Regex issue ignored for now: {}'.format(w_list))
return str_to_search
def prep_ann_line(self, span_type, text, sentence, sent_start_idx, span_counter):
'\n Prepare a line that should be written to the .ann file, for a given span.\n\n :param text: A span of text (argument represented as string) found by SORE (or OpenIE if relevant lines are uncommented).\n :param sentence: String with the sentence from the original input, in which the span was found.\n :param sent_start_idx: Index of the sentence start in the whole document\n :param span_counter: Index of the previous span annotation in the whole document\n :return: The line to write to an .ann file, and the updated span_counter\n '
rel_match = None
if (text and (text in sentence)):
sc = (span_counter + 1)
s = sentence.find(text)
e = ((s + len(text)) + sent_start_idx)
return ('T{}\t{} {} {}\t{}'.format(sc, span_type, str((s + sent_start_idx)), str(e), text), sc)
elif text:
str_to_search = text.replace('(', '\\(').replace(')', '\\)').replace('[', ' ').replace(']', ' ')
try:
rel_match = re.search(str_to_search, sentence)
except:
print('Regex issue ignored for now: {}'.format(str_to_search))
if rel_match:
sc = (span_counter + 1)
s = (rel_match.start() + sent_start_idx)
e = (rel_match.end() + sent_start_idx)
return ('T{}\t{} {} {}\t{}'.format(sc, span_type, str(s), str(e), text), sc)
else:
return ('', span_counter)
def convert_NIE_annotations(self, sentence, narrowIE_args, sent_start_idx, span_counter, rel_counter):
'\n Converts the narrow IE annotations for a sentence to a list of annotations to write to the .ann file.\n\n :param sentence: String with input sentence.\n :param narrowIE_args: Argument phrases found through narrow IE in that sentence.\n :param sent_start_idx: Index of the sentence start in the whole document\n :param span_counter: Index of the previous span annotation in the whole document\n :param rel_counter: Index of the previous relation annotation in the whole document\n :return: Lines to write for the narrow IE extractions in this sentence, and updated counters.\n '
lines_to_write = []
narrowIE_unique_args = []
for arg in narrowIE_args[0]:
if (arg not in narrowIE_unique_args):
narrowIE_unique_args.append(arg)
NIE_span_ids = []
for str_to_search in narrowIE_unique_args:
(NIE_arg_span, span_counter) = self.prep_ann_line('NarrowIE_span', str_to_search, sentence, sent_start_idx, span_counter)
if (NIE_arg_span != ''):
NIE_span_ids.append(span_counter)
lines_to_write.append(NIE_arg_span)
while (len(NIE_span_ids) > 1):
(a1, a2) = NIE_span_ids[:2]
NIE_span_ids.pop(0)
relation = 'R{}\tNIE_extraction Arg0:T{} Arg1:T{}\t'.format(rel_counter, a1, a2)
rel_counter += 1
lines_to_write.append(relation)
return (lines_to_write, span_counter, rel_counter)
def convert_OIE_annotations(self, sentence, annotations, sent_start_idx, span_counter, event_counter, attribute_counter):
'\n Converts an Open IE extraction (after/before filtering) for a sentence to a list of annotations to write.\n\n :param sentence: String with input sentence.\n :param annotations: One of possibly multiple Open IE extraction for this sentence\n :param sent_start_idx: Index of the sentence start in the whole document\n :param span_counter: Index of the previous span annotation in the whole document\n :param event_counter: Index of the previous Open IE event annotation in the whole document\n :param attribute_counter: Index of the previous event attribute in the whole document\n :return: Lines to write for one Open IE extraction in this sentence, and updated counters.\n '
lines_to_write = []
attributes = []
event_span_ids = []
OIE_rel = annotations['rel']
(rel_span, span_counter) = self.prep_ann_line('OpenIE_rel_span', OIE_rel, sentence, sent_start_idx, span_counter)
if (rel_span == ''):
parts = OIE_rel.split(' ')
(rel_span, span_counter) = self.prep_ann_line('OpenIE_rel_span', max(parts, key=len), sentence, sent_start_idx, span_counter)
if (rel_span != ''):
event_span_ids.append(span_counter)
lines_to_write.append(rel_span)
OIE_arg_list = []
for arg in annotations['args']:
(w_list, _) = arg
if (w_list != []):
OIE_arg_list.append(w_list)
for w_list in OIE_arg_list:
str_to_search = self.get_complete_span(w_list, sentence)
(arg_span, span_counter) = self.prep_ann_line('OpenIE_span', str_to_search, sentence, sent_start_idx, span_counter)
if (arg_span != ''):
lines_to_write.append(arg_span)
if (event_span_ids != []):
event_span_ids.append(span_counter)
if (event_span_ids != []):
context_span = None
if (annotations['context'] != 'context()'):
str_to_search = annotations['context'][8:(- 1)]
(context_span, span_counter) = self.prep_ann_line('Context_span', str_to_search, sentence, sent_start_idx, span_counter)
context_id = span_counter
if (context_span != ''):
lines_to_write.append(context_span)
if (float(annotations['conf']) > 0.8):
confidence = 'High'
else:
confidence = 'Low'
attributes.append('A{}\tConfidence E{} {}'.format(attribute_counter, event_counter, confidence))
attribute_counter += 1
if annotations['negation']:
attributes.append('A{}\tNegation E{}'.format(attribute_counter, event_counter, confidence))
attribute_counter += 1
event = 'E{}\tOpenIE_rel_span:T{} '.format(event_counter, str(event_span_ids.pop(0)))
event_counter += 1
for (idx, arg) in enumerate(event_span_ids):
event += 'Arg{}:T{} '.format(idx, arg)
if context_span:
event += 'Context:T{}'.format(context_id)
lines_to_write.append(event)
lines_to_write += attributes
return (lines_to_write, span_counter, event_counter, attribute_counter)
def parse_argument_list(self, string_list):
'\n Parses a narrow IE argument from the narrow IE CSV file (duplicate of narrowIE_parser).\n\n :param string_list: String that holds the list of arguments in the CSV file.\n :return: List of argument-phrases\n '
return [x[1:(- 1)] for x in string_list[1:(- 1)].split(', ')]
def convert_to_BRAT(self, prefix):
'\n Convert the SORE extractions to BRAT annotations.\n\n :param prefix: Experiment name prefix for SORE files to convert.\n '
dataset = {}
for dataset_path in self.paths_to_datasets:
with open(dataset_path) as d:
dataset.update(json.load(d))
OIE_dict = {}
SORE_dict = {}
unfiltered_files = glob.glob(((self.SORE_path + prefix) + '*_unfiltered.json'))
filtered_files = glob.glob(((self.SORE_path + prefix) + '*[0-9].json'))
for annotation_file in unfiltered_files:
with open(annotation_file) as f:
OIE_dict.update(json.load(f))
for annotation_file in filtered_files:
with open(annotation_file) as f:
SORE_dict.update(json.load(f))
narrowIE_dict = {}
with open(self.narrowIE_csv, 'r') as csv_f:
reader = csv.DictReader(csv_f)
for row in reader:
doc_id = row['doc_id']
sent_id = row['sentence_nr']
relation_types = row['relation_types']
argument_list = list(self.parse_argument_list(row['arguments']))
if (doc_id in narrowIE_dict):
narrowIE_dict[doc_id].update({sent_id: [argument_list], 'rel_types': relation_types})
else:
narrowIE_dict[doc_id] = {sent_id: [argument_list], 'rel_types': relation_types}
OIE_dicts_per_category = {}
SORE_dicts_per_category = {}
for doc_id in SORE_dict.keys():
try:
if (dataset[doc_id]['metadata'] != None):
category = dataset[doc_id]['metadata']['category']
else:
category = 'No_categories'
except KeyError:
category = 'No_categories'
if (category in OIE_dicts_per_category):
OIE_dicts_per_category[category].update({doc_id: OIE_dict[doc_id]})
SORE_dicts_per_category[category].update({doc_id: SORE_dict[doc_id]})
else:
OIE_dicts_per_category[category] = {doc_id: OIE_dict[doc_id]}
SORE_dicts_per_category[category] = {doc_id: SORE_dict[doc_id]}
ann_file = ((((self.BRAT_output_path + prefix) + '[') + doc_id) + '].ann')
txt_file = ((((self.BRAT_output_path + prefix) + '[') + doc_id) + '].txt')
all_sentences = ''
lines_for_document = []
sent_start_idx = 0
doc_span_cnt = 0
doc_attr_cnt = 1
doc_event_cnt = 1
doc_rel_cnt = 1
for (sent_id, sent_) in OIE_dict[doc_id].items():
sentence = sent_[0].replace('(', '\\(').replace(')', '\\)')
lines_for_sent = []
if (sent_id in narrowIE_dict[doc_id]):
narrowIE_args = narrowIE_dict[doc_id][sent_id]
(anns, doc_span_cnt, doc_rel_cnt) = self.convert_NIE_annotations(sentence, narrowIE_args, sent_start_idx, doc_span_cnt, doc_rel_cnt)
lines_for_sent += anns
if (sent_id in SORE_dict[doc_id]):
extractions = SORE_dict[doc_id][sent_id][1:]
for extraction in extractions:
(anns, doc_span_cnt, doc_event_cnt, doc_attr_cnt) = self.convert_OIE_annotations(sentence, extraction['extraction'], sent_start_idx, doc_span_cnt, doc_attr_cnt, doc_event_cnt)
lines_for_sent += anns
sent_start_idx += len(sentence)
all_sentences += sentence
lines_for_document += lines_for_sent
with open(ann_file, 'w') as f:
for line in lines_for_document:
if (line.rstrip() != ''):
f.writelines((line + '\n'))
with open(txt_file, 'w') as f:
f.write(all_sentences)
for category in OIE_dicts_per_category.keys():
print('\nCATEGORY: {}'.format(category))
get_stats_unfiltered(OIE_dicts_per_category[category])
get_stats_filtered(SORE_dicts_per_category[category])
|
class NarrowIEOpenIECombiner(object):
'\n Encapsulates paths and settings for SORE filtering.\n '
def __init__(self, oie_data_dir, IDF_path, csv_path, SUBWORDUNIT, sp_size, number_of_clusters=50, stemming=False, stopwords=True, SUBWORD_UNIT_COMBINATION='avg', path_to_embeddings=None):
'\n Initialise the embedding and clustering settings and paths.\n\n :param oie_data_dir: Input directory to all files that contain OIE extractions.\n :param IDF_path: Path to the IDF_weights created during filter preparation.\n :param csv_path: Path to the CSV file with combined extractions from narrow IE.\n :param SUBWORDUNIT: Boolean value that indicates whether subwordunits have been used during IDF weight creation.\n :param sp_size: The size of the SentencePiece vocab used to compute subwordunits.\n :param number_of_clusters: The number of clusters to compute over the narrow IE arguments.\n :param stemming: Boolean that determines whether keyphrases are stemmed before filtering.\n :param stopwords: Boolean that determines whether stopwords are removed from keyphrases before filtering.\n :param SUBWORD_UNIT_COMBINATION: How the weights for subwordunits are combined to a single weight per word.\n :param path_to_embeddings: Path where ELMo PubMed embeddings can be found.\n '
self.oie_data_dir = oie_data_dir
self.csv_path = csv_path
self.number_of_clusters = number_of_clusters
self.SUBWORD_UNIT_COMBINATION = SUBWORD_UNIT_COMBINATION
self.stemming = stemming
self.stopwords = stopwords
self.IDF_path = IDF_path
self.filter_data_path = (IDF_path.rsplit('/', maxsplit=1)[0] + '/')
self.subwordunit = SUBWORDUNIT
if SUBWORDUNIT:
self.sp_size = str(sp_size)
else:
self.sp_size = ''
self.ELMo_options_path = ''
self.ELMo_weights_path = ''
if (not path_to_embeddings):
self.path_to_embeddings = 'SORE/data/filter_data/elmo_pubmed/'
self.check_for_embeddings()
def check_for_embeddings(self):
'\n Check if the ELMo (pubmed) embeddings are present. If not found, will download them for reuse.\n '
types = ['*.hdf5', '*.json']
embedding_files = []
for file_type in types:
embedding_files.extend(glob.glob((self.path_to_embeddings + file_type)))
if (embedding_files == []):
print('No embedding files found, beginning download of ELMo PubMed files.')
w = 'https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_weights_PubMed_only.hdf5'
o = 'https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_options.json'
wget.download(w, (self.path_to_embeddings + 'ELmo_PubMed_weights.hdf5'))
wget.download(o, (self.path_to_embeddings + 'ELmo_PubMed_options.json'))
self.ELMo_weights_path = (self.path_to_embeddings + 'ELmo_PubMed_weights.hdf5')
self.ELMo_options_path = (self.path_to_embeddings + 'ELmo_PubMed_options.json')
elif ((self.path_to_embeddings + 'ELmo_PubMed_weights.hdf5') in embedding_files):
self.ELMo_weights_path = (self.path_to_embeddings + 'ELmo_PubMed_weights.hdf5')
self.ELMo_options_path = (self.path_to_embeddings + 'ELmo_PubMed_options.json')
print('Found ELMo PubMed embeddings')
pass
else:
print('Assuming the ELMo PubMed embeddings are correctly set in {}'.format(self.path_to_embeddings))
def prepare_narrowIE_embeddings(self, prefix, sp_model_path):
'\n Prepare all embeddings for the narrow IE arguments, store them as pickle files for reuse (name based on settings).\n If these pickle files already exist, simply load the embeddings.\n\n :param prefix: Experiment name\n :param sp_model_path: Path to the pre-trained SentencePiece model.\n :return: Phrases extracted through narrow IE, corresponding embeddings and the embedder obj for re-use\n '
settings = '{pr}[{num_clusters}]_{sp}{w}_{stem}_{stop}'.format(pr=prefix, num_clusters=self.number_of_clusters, sp=(self.sp_size + '_'), w=str(self.SUBWORD_UNIT_COMBINATION), stem=str(self.stemming), stop=str(self.stopwords))
embedder = fu.PrepareEmbeddings(prefix, sp_model_path, self.sp_size, self.IDF_path, self.csv_path, self.ELMo_options_path, self.ELMo_weights_path, SUBWORD_UNIT_COMBINATION=self.SUBWORD_UNIT_COMBINATION, subwordunits=self.subwordunit, stemming=self.stemming, stopwords=self.stopwords)
if (not os.path.exists((self.filter_data_path + 'vectors/nIE_phrases_{settings}.pkl'.format(settings=settings)))):
try:
narrowIE_data = embedder.load_narrowIE_data()
narrowIE_embeddings = embedder.embed_all_narrowIE_phrases(narrowIE_data)
except TypeError:
print('Narrow IE arguments not properly embedded.')
return
with open((self.filter_data_path + 'vectors/nIE_phrases_{settings}.pkl'.format(settings=settings)), 'wb') as f:
pickle.dump(narrowIE_data, f)
with open((self.filter_data_path + 'vectors/nIE_emb_{settings}.pkl'.format(settings=settings)), 'wb') as f:
pickle.dump(narrowIE_embeddings, f)
else:
with open((self.filter_data_path + 'vectors/nIE_phrases_{settings}.pkl'.format(settings=settings)), 'rb') as f:
narrowIE_data = pickle.load(f)
with open((self.filter_data_path + 'vectors/nIE_emb_{settings}.pkl'.format(settings=settings)), 'rb') as f:
narrowIE_embeddings = pickle.load(f)
return (narrowIE_data, narrowIE_embeddings, embedder)
def get_docid_from_filename(self, filename, output_name=False):
'\n Simple returns the filename from a path.\n '
if output_name:
return (((self.oie_data_dir + 'processed/') + filename.rsplit('/', maxsplit=1)[1][:(- 4)]) + '_processed.txt')
return filename.rsplit('/', maxsplit=1)[1][:(- 4)]
def OIE_files_to_filter(self):
"\n Tries to ensure that only the 'processed OIE files' are selected for which narrow IE extractions are found.\n "
input_files = glob.glob((self.oie_data_dir + '*.txt'))
doc_ids_for_filtering = []
with open(self.csv_path, 'r') as csv_f:
reader = csv.DictReader(csv_f)
for row in reader:
doc_ids_for_filtering.append(row['doc_id'])
doc_ids_for_filtering = list(set(doc_ids_for_filtering))
return [f for f in input_files if (self.get_docid_from_filename(f) in doc_ids_for_filtering)]
def run(self, prefix, filter_settings, output_dir, irrelevant_cluster_ids, num_clusters_to_drop=2, print_stats=False, print_clusters=False, plot=False, cluster_names=None):
"\n Script to run the filtering process.\n\n :param prefix: Name of the experiment.\n :param filter_settings: A dict with filter settings, retrieved from the settings file (e.g. SORE/SORE_settings.json).\n :param output_dir: Directory to store filtered and unfiltered extractions as json files.\n :param print_stats: Boolean - determines whether you'd like to print the filtering statistics\n :param print_clusters: Boolean - determines whether you'd like to print the clusters to get some insight\n :param plot: Boolean - determines whether you'd like to plot the clusters (by default not being used)\n :param cluster_names: A list of names for the clusters, to provide the plot with labels.\n "
sp_model_path = (self.filter_data_path + '{}_{}.model'.format(prefix, self.sp_size))
(narrowIE_phrases, narrowIE_embeddings, embedder) = self.prepare_narrowIE_embeddings(prefix, sp_model_path)
clusterer = fu.ClusterTradeOffs(self.filter_data_path, self.number_of_clusters, self.sp_size, self.stemming, self.stopwords)
km_model = clusterer.get_Kmeans_model(prefix, narrowIE_phrases, narrowIE_embeddings)
(clusters, results) = clusterer.cluster(km_model, narrowIE_phrases, narrowIE_embeddings)
clusters_to_drop = clusterer.cluster_insight(results, num_clusters_to_drop)
print('Dropping {} for size of the clusters, and {} because selected'.format(str(clusters_to_drop), str(irrelevant_cluster_ids)))
clusters_to_drop += irrelevant_cluster_ids
filterer = fu.SoreFilter(self.oie_data_dir, self.csv_path, self.IDF_path, self.subwordunit, sp_model_path, self.ELMo_weights_path, self.ELMo_options_path, filter_settings)
filterer.start_filtering(output_dir, prefix, self.number_of_clusters, narrowIE_phrases, narrowIE_embeddings, embedder, km_model, clusters_to_drop, print_stats)
if plot:
if cluster_names:
category_list = [x for x in cluster_names.values()]
else:
category_list = [x for x in range(self.number_of_clusters)]
digits_proj = TSNE(random_state=self.randomstate).fit_transform(clusters)
clusterer.palplot(digits_proj, km_model, category_list)
|
class PrepIDFWeights():
'\n Encapsulates the setting for preparing IDF weights.\n '
def __init__(self, prefix, input_file_dir, output_dir, SUBWORDUNIT=True, STEMMING=False, STOPWORDS=False):
'\n Initialise with desired settings.\n\n :param prefix: Experiment name.\n :param input_file_dir: Directory with files to compute the IDF weights (and SentenciePiece model) for.\n :param output_dir: Directory to store the computed IDF weights (and SentenciePiece model).\n :param SUBWORDUNIT: Boolean that determines whether to apply subword unit splitting\n :param STEMMING: Boolean that determines whether to apply stemming\n :param STOPWORDS: Boolean that determines whether to remove stopwords\n '
if (not output_dir.endswith('/')):
print("Select a folder as output directory, make sure to end the string with '/'")
return
self.prefix = prefix
self.output_dir = output_dir
self.input_file_dir = input_file_dir
self.STEMMING = STEMMING
self.STOPWORDS = STOPWORDS
self.SUBWORDUNIT = SUBWORDUNIT
if SUBWORDUNIT:
if (self.STEMMING or self.STOPWORDS):
print("Note: stemming/stopword-removal does not affect IDF values for subword units. This hasn't been implemented, as it seems counter-productive w.r.t. the IDF values.")
self.sp = spm.SentencePieceProcessor()
self.sp_size = 10
def new_sentencepiece_vocab(self, sp_storage_dir):
'\n Train a new SentencePiece model and vocabulary.\n\n :param sp_storage_dir: Directory to store the SentencePiece model and vocabulary, set in :class:`~FilterPrep`.\n '
input_files = glob.glob((self.input_file_dir + '*.txt'))
current_pwd = (os.getcwd() + '/')
input_paths = [(current_pwd + x) for x in input_files]
model_prefix = ((self.prefix + '_') + str(self.sp_size))
os.chdir(sp_storage_dir)
try:
spm.SentencePieceTrainer.train(input=input_paths, model_prefix=model_prefix, vocab_size=self.sp_size)
except RuntimeError:
print('The vocab size of your input documents is likely smaller that sp_size.')
raise
os.chdir(current_pwd)
def txt_files_to_corpus(self, input_file):
'\n Prepare an input txt file to a list of sentences (following the settings of using subwordunits, stemming, stopwords),\n so it can be added to a single corpus to compute the IDF weights.\n\n :param input_file: .txt file to process\n :return: list of processed sentences\n '
if self.STOPWORDS:
list_of_stopwords = []
with open('SORE/my_utils/nltk_stopwords.txt') as f:
for line in f.readlines():
list_of_stopwords.append(line.strip())
with open(input_file) as f:
all_sentences = f.readlines()
processed_list_of_sentences = []
for sent in all_sentences:
if self.SUBWORDUNIT:
sentence = sent.encode('ascii', 'ignore')
processed_list_of_sentences += [str(vocab_idx) for vocab_idx in self.sp.EncodeAsIds(sentence)]
else:
spacy_sent = sent
if (self.STOPWORDS and (not self.STEMMING)):
spacy_sent = [t.text for t in spacy_nlp(sent)]
spacy_sent = [w for w in spacy_sent if (w not in list_of_stopwords)]
elif (self.STEMMING and (not self.STOPWORDS)):
spacy_sent = [t.text for t in spacy_nlp(sent)]
stemmed_sent = []
for w in spacy_sent:
stem = ''.join((t.stem() for t in TextBlob(w).words))
stemmed_sent.append(stem)
spacy_sent = stemmed_sent
elif (self.STEMMING and self.STOPWORDS):
spacy_sent = [t.text for t in spacy_nlp(sent)]
stemmed_sent = []
for w in spacy_sent:
if (w not in list_of_stopwords):
stem = ''.join((t.stem() for t in TextBlob(w).words))
stemmed_sent.append(stem)
spacy_sent = stemmed_sent
spacy_sent.append('\n')
processed_list_of_sentences += spacy_sent
return processed_list_of_sentences
def determine_output_name(self):
'\n Determines the output name for the IDF weight file, so it can be reused with the same settings.\n\n :return: path to the IDF weight file\n '
if self.SUBWORDUNIT:
output_name = ((self.output_dir + self.prefix) + 'IDF.json')
elif (self.STEMMING and self.STOPWORDS):
output_name = ((self.output_dir + self.prefix) + 'IDF_stemmed_no_stopwords.json')
elif self.STEMMING:
output_name = ((self.output_dir + self.prefix) + 'IDF_stemmed.json')
elif self.STOPWORDS:
output_name = ((self.output_dir + self.prefix) + 'IDF_no_stopwords.json')
else:
output_name = ((self.output_dir + self.prefix) + 'IDF.json')
return output_name
def dummy_tokenizer(self, doc):
return doc
def get_idf(self, corpus):
'\n Compute IDF values for a single corpus (list of sentences from selection of files).\n\n :param corpus: A single corpus (list of sentences)\n :return: Dict with IDF weights for all tokens found in the corpus\n '
vectorizer = TfidfVectorizer(strip_accents='unicode', use_idf=True, norm=None, smooth_idf=True, sublinear_tf=False, binary=False, stop_words=None, analyzer='word', tokenizer=self.dummy_tokenizer, lowercase=False, preprocessor=self.dummy_tokenizer, vocabulary=None)
vectorizer.fit_transform(corpus)
idf_Y = vectorizer.idf_
test_Y = dict(zip([str(x) for x in vectorizer.get_feature_names()], idf_Y))
return test_Y
def compute_IDF_weights(self, input_file_prefixes, sp_size, sp_storage_dir):
'\n Overarching function to compute or load the IDF weights, as well as train or load a SentencePiece model - based\n on the settings provided to :class:`~SORE.my_utils.PrepIDFWeights`\n\n :param input_file_prefixes: Select files to compute IDF weights for based on a possible prefixes, e.g., only compute IDF weights over files that are derived from the OA-STM corpus.\n :param sp_size: Size of the SentencePiece vocab, recommended 8k (input would be an int 8000), 16k or 32k, but this depends on the size of your dataset.\n :param sp_storage_dir: Directory to store sp model, I believe this is redundant - self.output_dir could be used.\n '
if self.SUBWORDUNIT:
self.sp_size = sp_size
sp_model_name = (self.output_dir + '{}_{}.model'.format(self.prefix, self.sp_size))
if os.path.exists(sp_model_name):
print('Loading existing sentencepiece model and vocab.')
self.sp.Load(sp_model_name)
else:
print('Making new sentencepiece model and vocab from input files.')
self.new_sentencepiece_vocab(sp_storage_dir)
self.sp.Load(sp_model_name)
corpus_list = []
input_files = []
for input_file_name_prefix in input_file_prefixes:
input_files += glob.glob(((self.input_file_dir + input_file_name_prefix) + '*.txt'))
if (len(input_files) > 1):
total = len(input_files)
for (idx, input_file) in enumerate(input_files):
print('Combining sentences into a single corpus for IDF ({}/{}); {}'.format((idx + 1), total, input_file))
corpus_list.append(self.txt_files_to_corpus(input_file))
IDF = self.get_idf(corpus_list)
output_name = self.determine_output_name()
with open(output_name, 'w') as f:
json.dump(IDF, f)
value_types = 'words'
if self.SUBWORDUNIT:
value_types = 'subword units'
print('Printing some IDF values, should be {}!'.format(value_types))
sanity_check = [x for x in IDF.keys()]
for x in sanity_check[:10]:
if self.SUBWORDUNIT:
print(self.sp.DecodeIds([int(x)]))
else:
print(x)
|
def clean_dict(content):
'\n Simple cleaning of the sentences found in the input files. Is called twice, during creation of\n OIE and narrowIE files.\n\n :param content: a dict containing {sent_id : sentence}\n :return content: a dict containing {sent_id : sentence}, where the sentences have been cleaned\n '
new_content = {}
new_sent = ''
new_sent_id = ''
for (sent_id, sent_) in content.items():
try:
sent = sent_['sentence'].rstrip()
sent = sent.replace('\n', ' ').replace('\t', ' ')
sent = re.sub(' +', ' ', sent)
if ((new_sent != '') and sent[0].isupper()):
new_content.update({new_sent_id[:(- 1)]: {'sentence': (new_sent + '.')}})
new_sent = ''
new_sent_id = ''
if ((sent[(- 1)] != '.') or sent.endswith('Fig.')):
new_sent += (' ' + sent)
new_sent_id += (str(sent_id) + '+')
continue
new_sent += sent
new_sent_id += str(sent_id)
new_content.update({new_sent_id: {'sentence': new_sent}})
new_sent = ''
new_sent_id = ''
except:
pass
return new_content
|
def write_sentences_to_txt_file(input_dict, output_folder):
'\n Reads the json input from a dataset file and prepares separate text files for OIE.\n\n :param input_dict: A json-file containing unprocessed papers.\n :param output_folder: Directory to write a txt file to, for each of the document IDs found in the input_dict.\n '
processed_files = []
for doc_id in input_dict:
output_name = ((output_folder + doc_id.replace('.', '_')) + '.txt')
try:
if os.path.exists(output_name):
print("{} already exists, skipping and assuming it's already been processed!".format(output_name))
else:
content = input_dict[doc_id]
content = clean_raw_input.clean_dict(content)
with open(output_name, 'w') as f:
for (sent_id, sentence) in content.items():
f.writelines('[LINE#{}] {}\n'.format(sent_id, sentence['sentence']))
processed_files.append(output_name)
print('Processed ', doc_id, 'to a separate text file for OIE')
except TypeError:
print('Something wrong at: ', doc_id)
|
def convert_doc_to_sciie_format(input_dict):
'\n Reads an unprocessed json file and prepares a list of sentences in the SciIE format\n\n :param input_dict: A dataset json-file containing unprocessed papers.\n :return: processed_sentences - a list of sentences ready to be input to a trained SciIE model\n '
processed_sentences = []
for doc_id in input_dict:
content = input_dict[doc_id]
content = clean_raw_input.clean_dict(content)
for (sent_id, sentence) in content.items():
sent_dict = {'clusters': [], 'doc_key': ((doc_id + '_') + str(sent_id))}
doc = spacy_nlp(sentence['sentence'])
sent_dict['ner'] = [[]]
sent_dict['relations'] = [[]]
sent_dict['sentences'] = [[token.text for token in doc]]
processed_sentences.append(sent_dict)
return processed_sentences
|
def quote_merger(doc):
matched_spans = []
matches = matcher(doc)
for (match_id, start, end) in matches:
span = doc[start:end]
matched_spans.append(span)
for span in matched_spans:
span.merge()
return doc
|
def spacy_nlp(too_be_parsed):
"\n Instantiate a Spacy nlp parser (spacy.load('en_core_web_sm', disable=['ner','tagger']), which matches a couple\n of 'trade-off' expressions as single tokens - rather than ['trade', '-', 'off'].\n\n :param too_be_parsed: Some string to be parsed, could be single or multiple sentences.\n :return: The space doc for that string, containing sentence and token information.\n "
return nlp(too_be_parsed)
|
def convert_spans_to_tokenlist(predicted_spans, corresponding_data):
'\n Converts the spans of relations found in a sentence to a list of tokens\n\n :param predicted_spans: SciIE output, formatted with span_start and span_end as token indices.\n :param corresponding_data: SciIE input file, which contains the list of tokens for each sentence.\n '
rel_c = Counter()
[relations] = predicted_spans['relation']
all_rel_arguments = []
tradeoff_arguments = []
modified_tradeoff_arguments = []
for rel in relations:
rel_c[rel[4]] += 1
if (rel[4] == 'Not_a_TradeOff'):
all_rel_arguments.append(corresponding_data['sentences'][0][rel[0]:(rel[1] + 1)])
all_rel_arguments.append(corresponding_data['sentences'][0][rel[2]:(rel[3] + 1)])
if (rel[4] == 'TradeOff'):
tradeoff_arguments.append(corresponding_data['sentences'][0][rel[0]:(rel[1] + 1)])
tradeoff_arguments.append(corresponding_data['sentences'][0][rel[2]:(rel[3] + 1)])
modified_tradeoff_arguments.append(corresponding_data['sentences'][0][rel[0]:(rel[1] + 1)])
modified_tradeoff_arguments.append(corresponding_data['sentences'][0][rel[2]:(rel[3] + 1)])
all_rel_arguments.append(corresponding_data['sentences'][0][rel[0]:(rel[1] + 1)])
all_rel_arguments.append(corresponding_data['sentences'][0][rel[2]:(rel[3] + 1)])
if (rel[4] == 'Arg_Modifier'):
arg_1 = corresponding_data['sentences'][0][rel[0]:(rel[1] + 1)]
arg_2 = corresponding_data['sentences'][0][rel[2]:(rel[3] + 1)]
if (arg_1 in modified_tradeoff_arguments):
modified_tradeoff_arguments.append((arg_1 + arg_2))
elif (arg_2 in modified_tradeoff_arguments):
modified_tradeoff_arguments.append((arg_2 + arg_1))
if (arg_1 in all_rel_arguments):
all_rel_arguments.append(arg_2)
elif (arg_2 in all_rel_arguments):
all_rel_arguments.append(arg_1)
return (all_rel_arguments, tradeoff_arguments, modified_tradeoff_arguments, rel_c)
|
def simple_tokens_to_string(tokenlist):
'\n Convert a list of tokens to a string.\n\n :param tokenlist: A list of tokens from the spacy parser\n :return : A string with all tokens concatenated, simply separated by a space.\n '
return ' '.join((x for x in tokenlist if ((x != '<s>') and (x != '</s>'))))
|
def read_sciie_output_format(data_doc, predictions_doc, RELATIONS_TO_STORE):
"\n Reads the SciIE input and predictions, and prepares a list of arguments to write to a csv file. Choices for RELATIONS_TO_STORE:\n * ALL - Use all narrow IE arguments and relations found in all documents.\n * TRADEOFFS - Use all narrow IE arguments and relations found in documents where a TradeOff relation was found.\n * TRADEOFFS_AND_ARGMODS - Use only the TradeOff relations and their modifiers (in documents where a TradeOff relation was found).\n\n :param data_doc: the input data to the SciIE system.\n :param predictions_doc: the predictions from the SciIE system for the same input data.\n :param RELATIONS_TO_STORE: variable that determines which arguments to store - choice\n between 'ALL', 'TRADEOFFS', and 'TRADEOFFS_AND_ARGMODS'\n :return: `output_all_sentences` a list of rows to write to a CSV file -\n [doc_id, sent_id, RELATIONS_TO_STORE, argument_list, sentence]\n "
predicted_dicts = []
with open(predictions_doc) as o:
for line in o.read().split('\n'):
if (len(line) > 10):
predicted_dicts.append(json.loads(line))
data_dicts = []
with open(data_doc) as d:
for line in d.read().split('\n'):
if (len(line) > 10):
data_dicts.append(json.loads(line))
lines_to_write = []
all_relations_counter = Counter()
for (preds_for_sent, sent) in zip(predicted_dicts, data_dicts):
rel_args_for_sent = []
if (preds_for_sent['relation'] != [[]]):
(all_modified_args, to_args, modified_to_args, rel_counter) = convert_spans_to_tokenlist(preds_for_sent, sent)
all_relations_counter += rel_counter
(doc_id, sent_id) = preds_for_sent['doc_key'].rsplit('_', maxsplit=1)
doc_id = doc_id.replace('.', '_')
sentence = simple_tokens_to_string(sent['sentences'][0])
argument_list = []
if (RELATIONS_TO_STORE == 'ALL'):
relation_types = 'All'
argument_list = [simple_tokens_to_string(arg) for arg in all_modified_args]
if (RELATIONS_TO_STORE == 'TRADEOFFS'):
relation_types = 'All relations for documents with a TradeOff'
if (modified_to_args != []):
argument_list = [simple_tokens_to_string(arg) for arg in to_args]
argument_list += [simple_tokens_to_string(arg) for arg in modified_to_args]
if (RELATIONS_TO_STORE == 'TRADEOFFS_AND_ARGMODS'):
relation_types = 'Only TradeOffs with their Arg-Modifiers'
if ((to_args != []) and (modified_to_args == [])):
argument_list = [simple_tokens_to_string(arg) for arg in to_args]
argument_list += [simple_tokens_to_string(arg) for arg in modified_to_args]
if (argument_list != []):
rel_args_for_sent.append([doc_id, sent_id, sentence, relation_types, argument_list])
if (rel_args_for_sent != []):
lines_to_write.append(rel_args_for_sent)
print('Relations found: ', rel_counter.most_common())
return lines_to_write
|
def start_parsing(data, pred, output_csv, RELATIONS_TO_STORE):
'\n Start the parsing of a single set of narrow IE predictions, and write these to a temporary CSV file.\n The CSV file will be combined with others into one large CSV. Choices for RELATIONS_TO_STORE:\n * ALL - Use all narrow IE arguments and relations found in all documents.\n * TRADEOFFS - Use all narrow IE arguments and relations found in documents where a TradeOff relation was found.\n * TRADEOFFS_AND_ARGMODS - Use only the TradeOff relations and their modifiers (in documents where a TradeOff relation was found).\n\n :param data: narrowIE input.\n :param pred: narrowIE predictions.\n :param output_csv: temporary csv file name.\n :param RELATIONS_TO_STORE: Settings for which relatiosn to store.\n :return:\n '
rows_to_write = read_sciie_output_format(data, pred, RELATIONS_TO_STORE)
with open(output_csv, 'w') as f:
writer = csv.writer(f)
writer.writerow(['doc_id', 'sentence_nr', 'sentence', 'relation_types', 'arguments'])
for rows_per_doc in rows_to_write:
for rows_per_sent in rows_per_doc:
writer.writerow(rows_per_sent)
f.close()
print('Converted the predicted ', RELATIONS_TO_STORE, ' to a csv file: ', output_csv)
|
def write_dicts_to_files(num_docs, dict_with_various_docs, input_doc, index, old_index, output_folder_OIE, output_folder_narrowIE):
'\n Call :func:`~SORE.my_utils.convert_json_article_to_SciIE.convert_doc_to_sciie_format` (and write the results) and\n :func:`~SORE.my_utils.convert_json_article_to_OIE5.write_sentences_to_txt_file`.\n\n :param num_docs: max number of input articles to group in a single narrow IE file.\n :param dict_with_various_docs: A group of num_docs articles to work on.\n :param input_doc: An input dataset path in json format, used to determine output names\n :param index: Final index of the set of articles to work on (old_index + num_docs)\n :param old_index: Starting index of current articles to work on.\n :param output_folder_OIE: output folder for OIE files, one for each doc_id\n :param output_folder_narrowIE: output folder for NarrowIE files, one for each input_file\n :return:\n '
convert_to_OIE.write_sentences_to_txt_file(dict_with_various_docs, output_folder_OIE)
if (index < (num_docs - 1)):
narrowIE_output_name = input_doc.rsplit('/', maxsplit=1)[1]
else:
narrowIE_output_name = input_doc.rsplit('/', maxsplit=1)[1].replace('.', '#{}-{}_narrowIE_input.'.format((old_index + 1), (index + 1)))
output_file_path = (output_folder_narrowIE + narrowIE_output_name)
if os.path.exists(output_file_path):
print("{} already exists, skipping and assuming it's already been processed!".format(narrowIE_output_name))
else:
output_file = open(output_file_path, 'w', encoding='utf-8')
narrowIE_inputdata = []
dict_list = convert_to_SciIE.convert_doc_to_sciie_format(dict_with_various_docs)
narrowIE_inputdata += dict_list
for dic in dict_list:
json.dump(dic, output_file)
output_file.write('\n')
print('Wrote the input for the SciIE system to: ', (output_folder_narrowIE + narrowIE_output_name))
|
def convert_documents(max_num_docs_narrowIE, input_files, output_folder_OIE, output_folder_narrowIE):
'\n Reads an unprocessed json file and prepares the input document for narrow and open IE. Scraped\n text in JEB and BMC files is processed to single-sentence-dict:\n # {"doc_id": {"sent_id": {"sentence":\n\n :param input_files: list of a json-files containing unprocessed papers\n :param output_folder_OIE: output folder for OIE files, one for each doc_id\n :param output_folder_narrowIE: output folder for NarrowIE files, one for each input_file\n '
for input_file in input_files:
num_docs = max_num_docs_narrowIE
print('\nCollecting sentences from (max batch size {}): {}'.format(num_docs, input_file))
with open(input_file) as f:
data = json.load(f)
i = 1
dict_with_various_docs = {}
old_index = 0
for (index, doc_id) in enumerate(tqdm(data, position=0, leave=True)):
all_sections = data[doc_id]['sections'].keys()
sections = []
for section in all_sections:
if (section.lower() == 'references'):
pass
else:
sections.append(section)
if (len(sections) < 2):
print('Dropped document {}, because it only contains the sections: {}'.format(doc_id, sections))
continue
else:
if (((index + 1) // num_docs) == i):
i += 1
write_dicts_to_files(num_docs, dict_with_various_docs, input_file, index, old_index, output_folder_OIE, output_folder_narrowIE)
dict_with_various_docs = {}
old_index = index
dict_with_various_docs[doc_id] = {}
sent_count = 0
for section in sections:
list_of_paragraphs = data[doc_id]['sections'][section]['text']
for paragraph in list_of_paragraphs:
parsed_paragraph = spacy_nlp(paragraph)
for sent in parsed_paragraph.sents:
if (len(sent.text) > 30):
dict_with_various_docs[doc_id][sent_count] = {'sentence': sent.text}
sent_count += 1
write_dicts_to_files(num_docs, dict_with_various_docs, input_file, index, old_index, output_folder_OIE, output_folder_narrowIE)
processed_file = ((input_file.rsplit('/', maxsplit=1)[0] + '/processed/') + input_file.rsplit('/', maxsplit=1)[(- 1)])
shutil.move(input_file, processed_file)
print('Processed: ', input_file.rsplit('/', maxsplit=1)[(- 1)])
print('Done preparing data for OIE and narrow IE!')
|
class OpenIE5_client(object):
'\n Encapsulates functionality to query the Open IE 5 standalone server.\n '
def __init__(self, csv_path, oie_data_dir, path_to_OIE_jar):
'\n Initialise with relevant paths.\n\n :param csv_path: The narrow IE predictions CSV file holds the document identifiers relevant for SORE filtering, rather running OpenIE5 on all documents.\n :param oie_data_dir: The path to all OpenIE5 input .txt files.\n :param path_to_OIE_jar: The path to the OpenIE 5 standalone jar file.\n '
self.csv_path = csv_path
self.oie_data_dir = oie_data_dir
self.path_to_OIE_jar = path_to_OIE_jar
self.current_pwd = os.getcwd()
def get_docid_from_filename(self, filename, output_name=False):
'\n Find the document ID in the file_path, or the file_name itself.\n\n :param filename: Path to file\n :param output_name: Boolean - true = return filename, false = return doc ID\n :return:\n '
if output_name:
return (((self.oie_data_dir + 'processed/') + filename.rsplit('/', maxsplit=1)[1][:(- 4)]) + '_processed.txt')
return filename.rsplit('/', maxsplit=1)[1][:(- 4)]
def determine_in_and_output_files(self):
'\n Determine pairs of OIE input filepaths and corresponding OIE output filepaths.\n '
input_files = glob.glob((self.oie_data_dir + 'inputs/*.txt'))
docs_with_central_relations = []
with open(self.csv_path, 'r') as csv_f:
reader = csv.DictReader(csv_f)
for row in reader:
docs_with_central_relations.append(row['doc_id'])
docs_with_central_relations = list(set(docs_with_central_relations))
OIE_input = [f for f in input_files if (self.get_docid_from_filename(f) in docs_with_central_relations)]
output_files = [self.get_docid_from_filename(x, True) for x in OIE_input]
file_paths = []
for (idx, input_file) in enumerate(OIE_input):
file_paths.append([input_file, output_files[idx]])
return file_paths
def parse_extractions(self, dict_list):
'\n Parses the OpenIE5 json output for a single line, which has the format:\n [{\'confidence\' : x1, \'sentence\': y, \'extraction\': {\n \'arg1\': {\'text\': str},\n \'rel\' : {\'text\': str},\n \'arg2s\': {\'text\': str},\n {\'confidence\' : x2, \'sentence\': y, \'extraction: {}\' },\n etc.. ]\n The output format is a list of lines to write, e.g., "0.900 [*A*] arg1 [*R*] rel [*A*] arg2 [*A*] arg 3 etc.. context(...) negated: ..., passive: ..."\n\n :param dict_list: Json dict with OIE extractions for a sentence.\n :return: List of strings, each representing an extraction.\n '
lines_to_write = []
for tuple in dict_list:
for k in tuple['extraction'].keys():
if (k not in ['arg1', 'rel', 'arg2s', 'context', 'offset', 'negated', 'passive']):
print('Currently not handling the OIE extraction key: {}'.format(k))
pass
ex = tuple['extraction']
line_to_write = None
try:
context = ''
arg2s_string = ''
if ex['context']:
context = ex['context']['text']
for arg in ex['arg2s']:
arg2s_string += '[*A*]{}'.format(arg['text'])
line_to_write = '{:.3f}\t[*A*]{}[*R*]{}{}\tcontext({})\tnegated: {} ,passive: {}\n'.format(tuple['confidence'], ex['arg1']['text'], ex['rel']['text'], arg2s_string, context, str(ex['negated']), str(ex['passive']))
except:
pass
if line_to_write:
lines_to_write.append(line_to_write)
return lines_to_write
def get_extractions(self):
'\n Query the OpenIE 5 server running at port 8000, parse the extractions and write sentence and extractions to a file.\n '
os.chdir(self.current_pwd)
extractor = OpenIE5('http://localhost:8000')
input_output_files = self.determine_in_and_output_files()
for (input_file, output_file) in input_output_files:
if os.path.exists(output_file):
print("{} already exists, skipping and assuming it's already been processed!".format(output_file))
else:
with open(input_file, encoding='ascii', errors='ignore') as f:
lines_in_file = f.readlines()
number_of_lines = 0
number_of_lines_processed = 0
with open(output_file, 'w') as of:
for line in tqdm(lines_in_file, position=0, leave=True):
if (line == '\n'):
pass
else:
number_of_lines += 1
(sent_id, sent) = line.split('] ', maxsplit=1)
of.writelines('{}] {}\n'.format(sent_id, sent.rstrip()))
try:
extractions = extractor.extract(sent)
stuff_to_write = self.parse_extractions(extractions)
of.writelines(stuff_to_write)
number_of_lines_processed += 1
except:
print("Can't process: {}".format(line.rstrip()))
pass
sys.stderr.flush()
print('Processed {}/{} lines in {} with OpenIE5\n'.format(number_of_lines_processed, number_of_lines, input_file[:(- 4)].rsplit('/', maxsplit=1)[1]))
print('Finished processing files with OpenIE5, will now shut down server.')
def start_server(self):
'\n Asks the user to start the OpenIE5 server at port 8000, providing the command and waiting for user input to continue.\n '
print('Starting server at port 8000')
OIE_dir = self.path_to_OIE_jar.split('target')[0]
os.chdir(OIE_dir)
print('To start an OpenIE5 server copy the following line into a new terminal window and run:')
print('cd {} ; java -Xmx10g -XX:+UseConcMarkSweepGC -jar {} --ignore-errors --httpPort 8000\n'.format(OIE_dir, self.path_to_OIE_jar))
def stop_server(self):
'\n Once all Open IE extractions have been collected and written to files, the server is shut down.\n '
try:
os.system((("kill -9 `ps aux | grep 'java -Xmx10g -XX:+UseConcMarkSweepGC -jar " + self.path_to_OIE_jar) + " --ignore-errors --httpPort 8000'| grep -v grep | awk '{print $2; exit}'`"))
print('Stopped the server')
except:
print('Error shutting down a pre-existing OpenIE5 server at port 8000')
|
def run_OpenIE_5(csv_path, path_to_OIE_jar=None, unprocessed_paths='SORE/data/OpenIE/'):
"\n To run OpenIE5 a local server has to be started and queried. Not sure if python's GIL allows running these from a single script.\n\n :param csv_path: Path to the CSV with narrow IE predictions - only documents with extractions in the CSV will be fed to OIE.\n :param path_to_OIE_jar: Path to the OpenIE5 jar file - can be adjusted in the settings file.\n :param unprocessed_paths: Path where Open IE input files are written during data preparation.\n "
current_pwd = (os.getcwd() + '/')
if (path_to_OIE_jar == None):
print('Change `path_to_OIE_jar` to the OpenIE 5 jar you have to assemble!')
client = OpenIE5_client(csv_path, unprocessed_paths, path_to_OIE_jar)
client.start_server()
answer = input('Wait until the server is running to continue! Is the server ready? (y, n): ').lower()
if (answer == 'y'):
client.get_extractions()
elif (answer == 'n'):
pass
client.stop_server()
os.chdir(current_pwd)
|
class GithubURLDomain(Domain):
'\n Resolve certain links in markdown files to github source.\n '
name = 'githuburl'
ROOT = 'https://github.com/tensorpack/tensorpack/blob/master/'
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
github_url = None
if ('.html' not in target):
if (target.startswith('../../') and (not target.startswith('../../modules'))):
url = target.replace('../', '')
github_url = url
if (github_url is not None):
if github_url.endswith('README'):
github_url += '.md'
print('Ref {} resolved to github:{}'.format(target, github_url))
contnode['refuri'] = (self.ROOT + github_url)
return [('githuburl:any', contnode)]
else:
return []
|
def setup(app):
from recommonmark.transform import AutoStructify
app.add_config_value('recommonmark_config', {'enable_auto_toc_tree': True}, True)
app.add_transform(AutoStructify)
app.add_domain(GithubURLDomain)
|
class DataPreparation():
'\n Encapsulates the path to the input dataset, as well as paths to write files that can be processed\n by narrow IE and Open IE systems.\n '
def __init__(self, unprocessed_data_path='SORE/data/unprocessed_data/', output_folder_narrowIE='SORE/data/narrowIE/input/', output_folder_OIE='SORE/data/OpenIE/inputs/'):
'\n Construct DataPreparation object. There are no checks in place to see whether the provided paths exist.\n\n :param unprocessed_data_path: The path to the unprocessed input dataset (for an example of the format used\n check out the provided OA-STM dataset).\n :param output_folder_narrowIE: The path to write files that can be used as narrow IE input (SciIE input format).\n :param output_folder_OIE: The path to write files that can be used as Open IE input (OpenIE 5 input format)/\n '
self.unprocessed_data_path = unprocessed_data_path
self.output_folder_narrowIE = output_folder_narrowIE
self.output_folder_OIE = output_folder_OIE
def start(self, max_num_docs_narrowIE):
'\n Start preparing the data for narrow and open IE, includes a simple pre-processing step in\n :func:`~SORE.my_utils.clean_content`.\n\n :param max_num_docs_narrowIE: Maximum nr of documents to process at once with SciIE. This influences the time to pre-compute the ELMo embeddings for a large dataset (You should consider using a different model!).\n '
input_files = glob.glob((self.unprocessed_data_path + '*.json'))
prepare_data.convert_documents(max_num_docs_narrowIE, input_files, self.output_folder_OIE, self.output_folder_narrowIE)
|
class NarrowIEParser():
'\n Encapsulates the path and settings for the use of narrow IE predictions.\n The RELATIONS_TO_STORE parameter determines which narrow IE extractions you use for clustering.\n * ALL - Use all narrow IE arguments and relations found in all documents.\n * TRADEOFFS - Use all narrow IE arguments and relations found in documents where a TradeOff relation was found.\n * TRADEOFFS_AND_ARGMODS - Use only the TradeOff relations and their modifiers (in documents where a TradeOff relation was found).\n '
def __init__(self, narrowIE_data_dir='SORE/data/narrowIE/input/', narrowIE_predictions_dir='SORE/data/narrowIE/predictions/', RELATIONS_TO_STORE='TRADEOFFS_AND_ARGMODS', output_csv_path='SORE/data/narrowIE/'):
"\n Construct NarrowIEParser object. There are no checks in place to see whether the provided paths exist.\n\n :param narrowIE_data_dir: The path to where narrow IE input files were written by :class:`DataPreparation`.\n :param narrowIE_predictions_dir: The path to a directory where predictions can be found, corresponding to the input files.\n :param RELATIONS_TO_STORE: Which relations you'd like to use during clustering and filtering.\n :param output_csv_path: Path to store a single CSV file that store the narrow IE predictions for clustering and filtering.\n "
self.narrowIE_data = narrowIE_data_dir
self.narrowIE_predictions = narrowIE_predictions_dir
self.relations_to_store = RELATIONS_TO_STORE
self.output_csv_path = output_csv_path
def start(self, input_filename, predictions_filename, output_csv):
'\n Iterates over the files prepared by :class:`DataPreparation`, which is capped to a maximum number of input files.\n Convert the predictions outputted by the SciIE model to a csv format for clustering. Predictions have the format:\n\n {"doc_key": "XXX",\n "ner": [[[8, 8, "Generic"], [10, 10, "Generic"], [12, 14, "Generic"]]],\n "relation": [[[8, 8, 10, 10, "Not_a_TradeOff"], [8, 8, 12, 14, "Not_a_TradeOff"]]]}\n\n :param input_filename: Single file with narrow IE input, to retrieve the sentence string.\n :param predictions_filename: Single file with narrow IE predictions.\n :param output_csv: CSV_file to temporarily store the relations found in predictions_filename\n '
parse_narrowIE_output.start_parsing((self.narrowIE_data + input_filename), (self.narrowIE_predictions + predictions_filename), output_csv, self.relations_to_store)
|
class FilterPrep():
'\n Encapsulates paths to all OpenIE paths. Note that all .txt files prepared for Open IE will be used when training\n a SentencePiece model.\n '
def __init__(self, input_file_dir='SORE/data/OpenIE/inputs/', output_dir='SORE/data/filter_data/'):
'\n Initialise FilterPrep with a directory to store the subword model and IDF weights for re-use.\n\n :param input_file_dir: Directory with .txt files to process. Expecting one sentence a line, for OpenIE, for\n SentencePiece (currently runs on ALL input .txt files) and for the creations of a single, large IDF corpus\n (takes into account prefixes that can be provided in the settings file).\n :param output_dir: Directory to store IDF weights and SentencePiece model & vocab\n '
self.input_file_dir = input_file_dir
self.output_dir = output_dir
def determine_output_name(self, prefix='test', SUBWORDUNIT=True, STEMMING=False, STOPWORDS=False):
'\n Determines the output name for the IDF weights and SentencePiece model & vocab. In order to be re-used.\n\n :param prefix: Experiment name.\n :param SUBWORDUNIT: Boolean that determines whether to apply subword unit splitting\n :param STEMMING: Boolean that determines whether to apply stemming\n :param STOPWORDS: Boolean that determines whether to remove stopwords\n :return:\n '
if SUBWORDUNIT:
output_name = ((self.output_dir + prefix) + 'IDF.json')
elif (STEMMING and STOPWORDS):
output_name = ((self.output_dir + prefix) + 'IDF_stemmed_no_stopwords.json')
elif STEMMING:
output_name = ((self.output_dir + prefix) + 'IDF_stemmed.json')
elif STOPWORDS:
output_name = ((self.output_dir + prefix) + 'IDF_no_stopwords.json')
else:
output_name = ((self.output_dir + prefix) + 'IDF.json')
return output_name
def start(self, prefix='test', file_names='OA-STM', sp_size='8k', SUBWORDUNIT=True, STEMMING=False, STOPWORDS=False):
'\n Start preparing the IDF weights (and possible the Sentencie vocab and model).\n\n :param prefix: Experiment name.\n :param file_names: Prefix for files to compute the IDF weights for.\n :param sp_size: string that determines the name of the sentencepiece vocabulary\n :param SUBWORDUNIT: Boolean that determines whether to apply subword unit splitting\n :param STEMMING: Boolean that determines whether to apply stemming\n :param STOPWORDS: Boolean that determines whether to remove stopwords\n '
if (self.input_file_dir == 'SORE/data/OpenIE/inputs/'):
print('Compute IDF weights (and subword model) for all documents in the folder `SORE/data/OpenIE/inputs/`', ' that starts with either {}'.format(str(file_names)))
answer = input('Continue? (y, n): ').lower()
if (answer == 'y'):
pass
else:
return
IDF_computer = IDF_weight_utils.PrepIDFWeights(prefix, self.input_file_dir, self.output_dir, SUBWORDUNIT, STEMMING, STOPWORDS)
IDF_computer.compute_IDF_weights(file_names, sp_size, self.output_dir)
|
class SORE_filter():
'\n Encapsulates paths to all processed Open IE files and the processed narrow IE CSV file.\n '
def __init__(self, csv_path='data/narrowIE/tradeoffs_and_argmods.csv', sore_output_dir='SORE/data/processed_data/'):
'\n\n :param csv_path:\n :param sore_output_dir:\n '
self.csv_path = csv_path
self.sore_output_dir = sore_output_dir
def start(self, prefix, filter_settings, IDF_weights_path, SUBWORDUNIT, irrelevant_cluster_ids, oie_data_dir='SORE/data/OpenIE/processed/', sp_size=16000, number_of_clusters=50, num_clusters_to_drop=2, stemming=False, stopwords=True, SUBWORD_UNIT_COMBINATION='avg', print_stats=False, path_to_embeddings=None):
'\n Starts the SORE filtering process, this includes embedding all narrow IE arguments for clustering, as well as\n embedding all Open IE arguments to cluster with the same model, enabling cluster- and phrase-similarity checking.\n\n :param prefix: A name to identify the created files that store IDFweights, sentencepiece model and vocab, etc.\n :param filter_settings: A dict with filter settings, retrieved from the settings file (e.g. SORE/SORE_settings.json).\n :param IDF_weights_path: Path to the IDF_weights created during filter preparation.\n :param SUBWORDUNIT: Boolean value that indicates whether subwordunits have been used during IDF weight creation.\n :param sp_size: Size of vocab used for sentencepiece subwordunits.\n :param stemming: Boolean that determines whether keyphrases are stemmed before filtering.\n :param stopwords: Boolean that determines whether stopwords are removed from keyphrases before filtering.\n :param SUBWORD_UNIT_COMBINATION: How the weights for subwordunits are combined to a single weight per word.\n :param print_stats: Whether to print the statistics on unfiltered OIE extractions.\n :param path_to_embeddings: Path where ELMo PubMed embeddings can be found.\n '
filter = filterOIE_with_narrowIE.NarrowIEOpenIECombiner(oie_data_dir, IDF_weights_path, self.csv_path, SUBWORDUNIT, sp_size, number_of_clusters, stemming, stopwords, SUBWORD_UNIT_COMBINATION, path_to_embeddings)
filter.run(prefix, filter_settings, self.sore_output_dir, irrelevant_cluster_ids, num_clusters_to_drop, print_stats, print_clusters=True, plot=False, cluster_names=None)
|
def main(all_settings):
'\n Run Semi-Open Relation Extraction code following the provided settings file.\n\n :param all_settings: Settings-file provided - e.g. python run_SORE.py -s "path_to_my_settings_file.json"\n '
oie_data_dir = 'SORE/data/OpenIE/processed/'
sore_output_dir = 'SORE/data/processed_data/'
brat_output_dir = 'SORE/data/brat_annotations/'
prep = all_settings['Prepare_data']
parse_narrow = all_settings['Parse_narrowIE_predictions']
runOIE = all_settings['Run_OIE']
filter = all_settings['Filter_OIE']
max_num_docs_narrowIE = all_settings['data_prep']['max_num_docs_narrowIE']
narrowIE_input_files = all_settings['narrowIE']['narrowIE_input_files']
RELATIONS_TO_STORE = all_settings['narrowIE']['RELATIONS_TO_STORE']
path_to_OIE_jar = all_settings['OpenIE']['path_to_OIE_jar']
sp_size = all_settings['Filtering']['sp_size']
SUBWORDUNIT = all_settings['Filtering']['SUBWORDUNIT']
STEMMING = all_settings['Filtering']['STEMMING']
STOPWORDS = all_settings['Filtering']['STOPWORDS']
prefix = all_settings['Filtering']['prefix']
file_names = all_settings['Filtering']['file_names']
number_of_clusters = all_settings['Filtering']['number_of_clusters']
num_clusters_to_drop = all_settings['Filtering']['num_largest_clusters_to_drop']
irrelevant_cluster_ids = all_settings['Filtering']['irrelevant_cluster_ids']
SUBWORD_UNIT_COMBINATION = all_settings['Filtering']['SUBWORD_UNIT_COMBINATION']
print_stats = all_settings['Filtering']['print_stats']
filter_settings = all_settings['Filtering']['filter_settings']
convert_back_to_BRAT = all_settings['convert_back_to_BRAT']
if prep:
prep_obj = DataPreparation()
prep_obj.start(max_num_docs_narrowIE)
suffix_options = ['ALL', 'TRADEOFFS', 'TRADEOFFS_AND_ARGMODS']
suffixes = ['_all_arguments.csv', '_tradeoffs.csv', '_tradeoffs_and_argmods.csv']
output_suffix = suffixes[suffix_options.index(RELATIONS_TO_STORE)]
narrowIE_parser = NarrowIEParser(RELATIONS_TO_STORE=RELATIONS_TO_STORE)
combined_name = ((narrowIE_parser.output_csv_path + prefix) + output_suffix)
if parse_narrow:
parse_files = [[input_filename, ('predictions_' + input_filename)] for input_filename in narrowIE_input_files]
csv_files = []
for (input_filename, predictions_filename) in parse_files:
output_csv = ((narrowIE_parser.output_csv_path + input_filename.rsplit('.', maxsplit=1)[0]) + output_suffix)
narrowIE_parser.start(input_filename, predictions_filename, output_csv)
csv_files.append(output_csv)
combined_csv = pd.concat([pd.read_csv(f, engine='python') for f in csv_files])
combined_csv.to_csv(combined_name, index=False, encoding='utf-8')
print('Written all predictions to {}.'.format(combined_name))
for file_to_remove in csv_files:
os.remove(file_to_remove)
if runOIE:
run_OIE5.run_OpenIE_5(combined_name, path_to_OIE_jar)
prepper = FilterPrep()
my_SORE_filter = SORE_filter(combined_name, sore_output_dir)
if filter:
IDF_weights_path = prepper.determine_output_name(prefix, SUBWORDUNIT, STEMMING, STOPWORDS)
if os.path.exists(IDF_weights_path):
print('Assuming IDF weights and sentencepiece model exist, since path exists: {} '.format(IDF_weights_path))
else:
prepper.start(prefix, file_names, sp_size, SUBWORDUNIT, STEMMING, STOPWORDS)
my_SORE_filter.start(prefix, filter_settings, IDF_weights_path, SUBWORDUNIT, irrelevant_cluster_ids, oie_data_dir, sp_size, number_of_clusters, num_clusters_to_drop, STEMMING, STOPWORDS, SUBWORD_UNIT_COMBINATION, print_stats)
if convert_back_to_BRAT:
dataset_paths = []
for input_data_prefix in file_names:
dataset_paths += glob.glob('SORE/data/unprocessed_data/processed/{}*.json'.format(input_data_prefix))
converter = SORE_to_BRAT.BratConverter(dataset_paths, combined_name, sore_output_dir, brat_output_dir)
converter.convert_to_BRAT(prefix)
|
def standalone_TradeoffWordSplitter():
nlp = spacy.load('en_core_web_sm')
matcher = Matcher(nlp.vocab)
matcher.add('trade-off', None, [{'ORTH': 'trade'}, {'ORTH': '-'}, {'ORTH': 'off'}])
matcher.add('trade-offs', None, [{'ORTH': 'trade'}, {'ORTH': '-'}, {'ORTH': 'offs'}])
matcher.add('Trade-off', None, [{'ORTH': 'Trade'}, {'ORTH': '-'}, {'ORTH': 'off'}])
matcher.add('Trade-offs', None, [{'ORTH': 'Trade'}, {'ORTH': '-'}, {'ORTH': 'offs'}])
matcher.add('Trade-Off', None, [{'ORTH': 'Trade'}, {'ORTH': '-'}, {'ORTH': 'Off'}])
matcher.add('Trade-Offs', None, [{'ORTH': 'Trade'}, {'ORTH': '-'}, {'ORTH': 'Offs'}])
matcher.add('parentheses', None, [{'ORTH': '('}, {}, {'ORTH': ')'}])
matcher.add('<s>', None, [{'ORTH': '<'}, {'ORTH': 's'}, {'ORTH': '>'}])
matcher.add('</s>', None, [{'ORTH': '<'}, {'ORTH': '/s'}, {'ORTH': '>'}])
def quote_merger(doc):
matched_spans = []
matches = matcher(doc)
for (match_id, start, end) in matches:
span = doc[start:end]
matched_spans.append(span)
for span in matched_spans:
span.merge()
return doc
nlp.add_pipe(quote_merger, first=True)
return nlp
|
def get_annotations_from_ann_file(nlp, sentence, ann_file):
'\n Stores the annotations from an .ann file into the following buffers, then stores\n them in our json format.\n '
event_buffer = {}
span_buffer = {}
label_buffer = {}
argmod_buffer = {}
with open(ann_file) as f:
lines = f.readlines()
for (idx, line) in enumerate(lines):
if line.startswith('E'):
(tradeoff_id, spans_and_slots) = line.rstrip().split('\t')
event_buffer[tradeoff_id] = {}
slots = spans_and_slots.split(' ')
for (idx, slot) in enumerate(slots):
(ann_name, ann_id) = slot.split(':')
event_buffer[tradeoff_id][ann_name] = ann_id
elif line.startswith('T'):
(span_id, span_and_type, text) = line.rstrip().split('\t')
(_, span_start, span_end) = span_and_type.split(' ')
span_buffer[span_id] = {'span_id': span_id, 'span_start': span_start, 'span_end': span_end, 'text': text}
elif line.startswith('A'):
(label_id, labeltype_tradeoff_id_label_value) = line.rstrip().split('\t')
label_split = labeltype_tradeoff_id_label_value.split(' ')
label_type = ''
tradeoff_id = ''
label_value = ''
if (len(label_split) == 1):
print(label_split)
for (idx, l) in enumerate(label_split):
if (idx == 0):
label_type = l
elif (idx == 1):
tradeoff_id = l
elif (idx == 2):
label_value = l
if ((label_type == 'Confidence') and (label_value == '')):
label_value = 'Low'
if (tradeoff_id not in label_buffer):
label_buffer[tradeoff_id] = {label_type: label_value}
else:
label_buffer[tradeoff_id][label_type] = label_value
elif line.startswith('R'):
(arg_mod_id, spans_and_slots) = line.rstrip().split('\t')
(_, modifier, modified_arg) = spans_and_slots.split(' ')
(mod_name, mod_id) = modifier.split(':')
(modified_name, modified_id) = modified_arg.split(':')
argmod_buffer[arg_mod_id] = {mod_name: mod_id, modified_name: modified_id}
doc = nlp(sentence)
for token in doc:
for span in span_buffer:
start = int(span_buffer[span]['span_start'])
end = int(span_buffer[span]['span_end'])
if (token.idx == start):
span_buffer[span]['span_start'] = str(token.i)
if ((token.idx + len(token.text)) == end):
span_buffer[span]['span_end'] = str(token.i)
annotations = {'tradeoffs': {}, 'modifiers': {}}
for (indicator_id, tradeoff_tuple) in event_buffer.items():
annotations['tradeoffs'][indicator_id] = {'labels': label_buffer[indicator_id]}
for (tradeoff_part, span_id) in tradeoff_tuple.items():
annotations['tradeoffs'][indicator_id][tradeoff_part] = span_buffer[span_id]
for (modifier_id, modifier_tuple) in argmod_buffer.items():
annotations['modifiers'][modifier_id] = {}
for (arg_name, span_id) in modifier_tuple.items():
annotations['modifiers'][modifier_id][arg_name] = span_buffer[span_id]
for (mod_id, modifier_args) in annotations['modifiers']:
if (len(modifier_args) > 2):
print('Mod_args > 2:', modifier_args)
return annotations
|
def main():
nlp = standalone_TradeoffWordSplitter()
all_files = [d for d in os.listdir(ann_dir)]
data = {}
for filename in all_files:
if filename.endswith('.txt'):
(no_extension, _) = filename.rsplit('.', maxsplit=1)
(document_name, to_nr) = no_extension.rsplit('_', maxsplit=1)
sentence = ''
with open((ann_dir + filename), 'r') as f:
sentence = f.read()
if (document_name in data):
data[document_name][to_nr] = {'sentence': sentence}
else:
data[document_name] = {to_nr: {'sentence': sentence}}
annotations = get_annotations_from_ann_file(nlp, sentence, ((ann_dir + filename[:(- 3)]) + 'ann'))
data[document_name][to_nr]['annotations'] = annotations
json.dump(data, open('dataset.json', 'w'), indent=4, sort_keys=True)
|
class Scorer(object):
def __init__(self, metric):
self.precision_numerator = 0
self.precision_denominator = 0
self.recall_numerator = 0
self.recall_denominator = 0
self.metric = metric
self.num_labels = 0
def update(self, gold, predicted, labeltype=None):
(p_num, p_den, r_num, r_den) = self.metric(self, gold, predicted, labeltype)
self.precision_numerator += p_num
self.precision_denominator += p_den
self.recall_numerator += r_num
self.recall_denominator += r_den
def get_f1(self):
precision = (0 if (self.precision_denominator == 0) else (self.precision_numerator / float(self.precision_denominator)))
recall = (0 if (self.recall_denominator == 0) else (self.recall_numerator / float(self.recall_denominator)))
return (0 if ((precision + recall) == 0) else (((2 * precision) * recall) / (precision + recall)))
def get_recall(self):
if (self.recall_numerator == 0):
return 0
else:
return (self.recall_numerator / float(self.recall_denominator))
def get_precision(self):
if (self.precision_numerator == 0):
return 0
else:
return (self.precision_numerator / float(self.precision_denominator))
def get_prf(self):
return (self.get_precision(), self.get_recall(), self.get_f1(), self.num_labels)
def f1_score(self, gold_labels, pred_labels, labeltype=None):
'\n ◮ Precision: (# spans correctly assigned ~ p_num) TP / (# spans assigned ~ p_den) TP+FP\n ◮ Recall: (# spans correctly assigned ~ r_num) TP / (total # of spans ~ r_den) TP+FN\n '
(p_num, p_den, r_num) = (0, 0, 0)
if labeltype:
gold_labels_ = [g for g in gold_labels if (g[(- 1)] == labeltype)]
pred_labels_ = [p for p in pred_labels if (p[(- 1)] == labeltype)]
else:
gold_labels_ = gold_labels
pred_labels_ = pred_labels
r_den = len(gold_labels_)
self.num_labels += r_den
for span in pred_labels_:
p_den += 1
if (len(span) > 3):
(arg1_s, arg1_e, arg2_s, arg2_e, rel) = span
order_1 = [arg1_s, arg1_e, arg2_s, arg2_e, rel]
order_2 = [arg2_s, arg2_e, arg1_s, arg1_e, rel]
if ((order_1 in gold_labels_) or (order_2 in gold_labels_)):
p_num += 1
r_num += 1
elif (span in gold_labels_):
p_num += 1
r_num += 1
return (p_num, p_den, r_num, r_den)
|
def compare_against_gold(gold_input, pred_input):
gold_annotations = []
with open(gold_input) as o:
for line in o.read().split('\n'):
if (len(line) > 10):
gold_annotations.append(json.loads(line))
predicted_annotations = []
with open(pred_input) as o:
for line in o.read().split('\n'):
if (len(line) > 10):
predicted_annotations.append(json.loads(line))
span_scorer = Scorer(Scorer.f1_score)
triple_scorer = Scorer(Scorer.f1_score)
tradeoff_scorer = Scorer(Scorer.f1_score)
notatradeoff_scorer = Scorer(Scorer.f1_score)
argmodifier_scorer = Scorer(Scorer.f1_score)
for (g_ann, p_ann) in zip(gold_annotations, predicted_annotations):
if (g_ann['doc_key'] != p_ann['doc_key']):
print('Doc_key mismatch. Are you using right combination of gold annotations and predictions?')
break
else:
span_scorer.update(g_ann['ner'][0], p_ann['ner'][0])
triple_scorer.update(g_ann['relations'][0], p_ann['relation'][0])
tradeoff_scorer.update(g_ann['relations'][0], p_ann['relation'][0], 'TradeOff')
notatradeoff_scorer.update(g_ann['relations'][0], p_ann['relation'][0], 'Not_a_TradeOff')
argmodifier_scorer.update(g_ann['relations'][0], p_ann['relation'][0], 'Arg_Modifier')
(span_p, span_r, span_f1, total_spans) = span_scorer.get_prf()
(triple_p, triple_r, triple_f1, total_tuples) = triple_scorer.get_prf()
(tradeoff_p, tradeoff_r, tradeoff_f1, total_tradeoffs) = tradeoff_scorer.get_prf()
(notatradeoff_p, notatradeoff_r, notatradeoff_f1, total_notatradeoffs) = notatradeoff_scorer.get_prf()
(argmodifier_p, argmodifier_r, argmodifier_f1, total_argmodifiers) = argmodifier_scorer.get_prf()
print('Predictions for {}'.format(gold_input))
print('\t\t Prec.\t Rec.\t F1 \t #total')
print('Spans: \t {:.2f} \t{:.2f}\t{:.2f}\t{}'.format((span_p * 100), (span_r * 100), (span_f1 * 100), total_spans))
print("Rel's: \t {:.2f} \t{:.2f}\t{:.2f}\t{}".format((triple_p * 100), (triple_r * 100), (triple_f1 * 100), total_tuples))
print('\t\t Prec.\t Rec.\t F1 \t #total')
print("TO's: \t {:.2f} \t{:.2f}\t{:.2f}\t{}".format((tradeoff_p * 100), (tradeoff_r * 100), (tradeoff_f1 * 100), total_tradeoffs))
print("NoTO's:\t {:.2f} \t{:.2f}\t{:.2f}\t{}".format((notatradeoff_p * 100), (notatradeoff_r * 100), (notatradeoff_f1 * 100), total_notatradeoffs))
print("ArgM's:\t {:.2f} \t{:.2f}\t{:.2f}\t{}".format((argmodifier_p * 100), (argmodifier_r * 100), (argmodifier_f1 * 100), total_argmodifiers))
|
def standalone_TradeoffWordSplitter():
nlp = spacy.load('en_core_web_sm')
matcher = Matcher(nlp.vocab)
matcher.add('trade-off', None, [{'ORTH': 'trade'}, {'ORTH': '-'}, {'ORTH': 'off'}])
matcher.add('trade-offs', None, [{'ORTH': 'trade'}, {'ORTH': '-'}, {'ORTH': 'offs'}])
matcher.add('Trade-off', None, [{'ORTH': 'Trade'}, {'ORTH': '-'}, {'ORTH': 'off'}])
matcher.add('Trade-offs', None, [{'ORTH': 'Trade'}, {'ORTH': '-'}, {'ORTH': 'offs'}])
matcher.add('Trade-Off', None, [{'ORTH': 'Trade'}, {'ORTH': '-'}, {'ORTH': 'Off'}])
matcher.add('Trade-Offs', None, [{'ORTH': 'Trade'}, {'ORTH': '-'}, {'ORTH': 'Offs'}])
matcher.add('parentheses', None, [{'ORTH': '('}, {}, {'ORTH': ')'}])
def quote_merger(doc):
matched_spans = []
matches = matcher(doc)
for (match_id, start, end) in matches:
span = doc[start:end]
matched_spans.append(span)
for span in matched_spans:
span.merge()
return doc
nlp.add_pipe(quote_merger, first=True)
return nlp
|
def convert_dataset_to_SCIIE(nlp, dataset):
'"\n Convert dataset to required format for SciIE:\n line1 { "clusters": [],\n "sentences": [["List", "of", "some", "tokens", "."]],\n "ner": [[[4, 4, "Generic"]]],\n "relations": [[[4, 4, 6, 17, "Tradeoff"]]],\n "doc_key": "XXX"}\n line2 { " ...\n '
with open(dataset) as f:
data = json.load(f)
converted_dataset = []
doc_count = 0
for source_doc_id in tqdm(data):
doc_count += 1
for sentence_id in data[source_doc_id]:
sent_dict = {'clusters': [], 'doc_key': ((source_doc_id + '_') + sentence_id)}
ner = []
relations = []
sentence = data[source_doc_id][sentence_id]['sentence']
doc = nlp(sentence)
tradeoffs = data[source_doc_id][sentence_id]['annotations']['tradeoffs']
modifiers = data[source_doc_id][sentence_id]['annotations']['modifiers']
unique_arg_spans = []
unique_tuples = []
for tradeoff_id in tradeoffs.keys():
predicate_start = int(tradeoffs[tradeoff_id]['TO_indicator']['span_start'])
predicate_end = int(tradeoffs[tradeoff_id]['TO_indicator']['span_end'])
relation_type = 'TradeOff'
if ('Negation' in tradeoffs[tradeoff_id]['labels']):
relation_type = 'Not_a_TradeOff'
arguments = ((key, value) for (key, value) in tradeoffs[tradeoff_id].items() if key.startswith('Arg'))
indicator_span = [predicate_start, predicate_end, 'trigger']
ner.append(indicator_span)
for (arg_id, arg) in arguments:
arg_span = [int(arg['span_start']), int(arg['span_end']), 'argument']
if (arg_span not in unique_arg_spans):
ner.append(arg_span)
unique_arg_spans.append(arg_span)
tuple = [indicator_span[0], indicator_span[1], arg_span[0], arg_span[1], relation_type]
if (tuple not in unique_tuples):
unique_tuples.append(tuple)
relations.append(tuple)
relation_type = 'Arg_Modifier'
for (mod_id, modifier) in modifiers.items():
mod_arg1_s = int(modifiers[mod_id]['Arg0']['span_start'])
mod_arg1_e = int(modifiers[mod_id]['Arg0']['span_end'])
mod_arg2_s = int(modifiers[mod_id]['Arg1']['span_start'])
mod_arg2_e = int(modifiers[mod_id]['Arg1']['span_end'])
arg1_span = [mod_arg1_s, mod_arg1_e, 'argument']
arg2_span = [mod_arg2_s, mod_arg2_e, 'argument']
for mod_arg in [arg1_span, arg2_span]:
if (mod_arg not in unique_arg_spans):
ner.append(mod_arg)
unique_arg_spans.append(mod_arg)
mod_tuple = [mod_arg1_s, mod_arg1_e, mod_arg2_s, mod_arg2_e, relation_type]
if (mod_tuple not in unique_tuples):
unique_tuples.append(mod_tuple)
relations.append(mod_tuple)
sent_dict['ner'] = [ner]
sent_dict['relations'] = [relations]
sent_dict['sentences'] = [[token.text for token in doc]]
converted_dataset.append(sent_dict)
print('{} of samples found in {} documents'.format(len(converted_dataset), doc_count))
return converted_dataset
|
def main():
nlp = standalone_TradeoffWordSplitter()
input_files = ['../data/train_set.json', '../data/dev_set.json', '../data/test_set.json']
for input_file in input_files:
output_name = (('../data/' + input_file.rsplit('/', 1)[1].rsplit('.')[0]) + '_SCIIE.json')
dic_list = convert_dataset_to_SCIIE(nlp, input_file)
output_file = open(output_name, 'w', encoding='utf-8')
for dic in dic_list:
json.dump(dic, output_file)
output_file.write('\n')
|
def standalone_TradeoffWordSplitter():
nlp = spacy.load('en_core_web_sm')
matcher = Matcher(nlp.vocab)
matcher.add('trade-off', None, [{'ORTH': 'trade'}, {'ORTH': '-'}, {'ORTH': 'off'}])
matcher.add('trade-offs', None, [{'ORTH': 'trade'}, {'ORTH': '-'}, {'ORTH': 'offs'}])
matcher.add('Trade-off', None, [{'ORTH': 'Trade'}, {'ORTH': '-'}, {'ORTH': 'off'}])
matcher.add('Trade-offs', None, [{'ORTH': 'Trade'}, {'ORTH': '-'}, {'ORTH': 'offs'}])
matcher.add('Trade-Off', None, [{'ORTH': 'Trade'}, {'ORTH': '-'}, {'ORTH': 'Off'}])
matcher.add('Trade-Offs', None, [{'ORTH': 'Trade'}, {'ORTH': '-'}, {'ORTH': 'Offs'}])
matcher.add('parentheses', None, [{'ORTH': '('}, {}, {'ORTH': ')'}])
def quote_merger(doc):
matched_spans = []
matches = matcher(doc)
for (match_id, start, end) in matches:
span = doc[start:end]
matched_spans.append(span)
for span in matched_spans:
span.merge()
return doc
nlp.add_pipe(quote_merger, first=True)
return nlp
|
def statistics_per_split(nlp, dataset):
'\n '
doc_count = 0
token_cnt = Counter()
sentence_len_cnt = Counter()
relation_cnt = Counter()
keyphrase_cnt = Counter()
trigger_cnt = Counter()
span_cnt = Counter()
rel_per_keyphrase = Counter()
triggers_per_sent = []
args_per_trigger = []
spans_per_sent = []
tuples_per_sent = []
with open(dataset) as f:
data = json.load(f)
for source_doc_id in tqdm(data):
doc_count += 1
for sentence_id in data[source_doc_id]:
unique_spans_in_sent = []
unique_tuples_in_sent = []
unique_sent_id = (str(source_doc_id) + str(sentence_id))
sentence: str = data[source_doc_id][sentence_id]['sentence']
doc = nlp(sentence)
sentence_len_cnt[len([t for t in doc])] += 1
for token in doc:
token_cnt[token.text] += 1
tradeoffs = data[source_doc_id][sentence_id]['annotations']['tradeoffs']
modifiers = data[source_doc_id][sentence_id]['annotations']['modifiers']
for tradeoff_id in tradeoffs.keys():
arguments = ((key, value) for (key, value) in tradeoffs[tradeoff_id].items() if key.startswith('Arg'))
relation_type = 'TradeOff'
if ('Negation' in tradeoffs[tradeoff_id]['labels']):
relation_type = 'Not_a_TradeOff'
trigger = tradeoffs[tradeoff_id]['TO_indicator']['text']
trigger_s = tradeoffs[tradeoff_id]['TO_indicator']['span_start']
trigger_e = tradeoffs[tradeoff_id]['TO_indicator']['span_end']
trigger_cnt[trigger] += 1
unique_spans_in_sent.append(trigger)
span_cnt[trigger] += 1
num_args_this_trigger = 0
for (arg_id, arg) in arguments:
num_args_this_trigger += 1
tuple = [trigger_s, trigger_e, arg['span_start'], arg['span_end'], relation_type]
if (tuple not in unique_tuples_in_sent):
relation_cnt[relation_type] += 1
unique_tuples_in_sent.append(tuple)
rel_per_keyphrase[(arg['text'] + unique_sent_id)] += 1
keyphrase_cnt[((int(arg['span_end']) - int(arg['span_start'])) + 1)] += 1
unique_spans_in_sent.append(arg['text'])
span_cnt[arg['text']] += 1
args_per_trigger.append(num_args_this_trigger)
for (mod_id, modifier) in modifiers.items():
relation_type = 'Arg_Modifier'
mod_arg1 = modifiers[mod_id]['Arg0']
mod_arg2 = modifiers[mod_id]['Arg1']
mod_tuple = [mod_arg1['span_start'], mod_arg1['span_end'], mod_arg2['span_start'], mod_arg2['span_end'], relation_type]
if (mod_tuple not in unique_tuples_in_sent):
relation_cnt[relation_type] += 1
unique_tuples_in_sent.append(mod_tuple)
for mod_arg in [mod_arg1, mod_arg2]:
rel_per_keyphrase[(mod_arg['text'] + unique_sent_id)] += 1
if (mod_arg['text'] not in unique_spans_in_sent):
unique_spans_in_sent.append(mod_arg['text'])
span_cnt[mod_arg['text']] += 1
keyphrase_cnt[((int(mod_arg['span_end']) - int(mod_arg['span_start'])) + 1)] += 1
triggers_per_sent.append(len(tradeoffs.keys()))
spans_per_sent.append(len(unique_spans_in_sent))
tuples_per_sent.append(len(unique_tuples_in_sent))
print('Statistics: \n {} samples found in {} documents'.format(sum(sentence_len_cnt.values()), doc_count))
print('# Sentences: {}'.format(sum(sentence_len_cnt.values())))
print('Avg. sent. length: {:.2f}'.format((sum([(k * v) for (k, v) in sentence_len_cnt.items()]) / sum(sentence_len_cnt.values()))))
print('% of sents ≥ 25: {0:.2%}'.format((sum([v for (k, v) in sentence_len_cnt.most_common() if (k > 24)]) / sum(sentence_len_cnt.values()))))
print('Relations:\n - Trade-Off: {}'.format(relation_cnt['TradeOff']))
print(' - Not-a-Trade-Off: {}'.format(relation_cnt['Not_a_TradeOff']))
print(' - Arg-Modifier: {}'.format(relation_cnt['Arg_Modifier']))
print('Triggers: {}'.format(sum(trigger_cnt.values())))
print('Keyphrases: {}'.format(sum(keyphrase_cnt.values())))
print('Keyphrases w/ multiple relations: {}'.format(len([v for v in rel_per_keyphrase.values() if (v > 1)])))
print('Spans: {}'.format(sum(span_cnt.values())))
print('Max args/trigger: {}'.format(max(args_per_trigger)))
print('Max triggers/sent: {}'.format(max(triggers_per_sent)))
print('Max spans/sent: {}'.format(max(spans_per_sent)))
print('Max tuples/sent: {}'.format(max(tuples_per_sent)))
print('Total relations: {}'.format(sum(tuples_per_sent)))
return (span_cnt, trigger_cnt, keyphrase_cnt, sentence_len_cnt, relation_cnt)
|
def main():
nlp = standalone_TradeoffWordSplitter()
total_sent_lengths = Counter()
unique_spans = Counter()
unique_triggers = Counter()
key_phrases = Counter()
total_rel_cnt = Counter()
input_files = ['../data/train_set.json', '../data/dev_set.json', '../data/test_set.json']
for input_file in input_files:
(spans, triggers, k_phrases, sent_cnt, rel_cnt) = statistics_per_split(nlp, input_file)
total_sent_lengths += sent_cnt
unique_spans += spans
unique_triggers += triggers
key_phrases += k_phrases
total_rel_cnt += rel_cnt
print('---------------------- \n Combined over splits \n---------------------- ')
print('Avg. sent. length: {:.2f}'.format((sum([(k * v) for (k, v) in total_sent_lengths.items()]) / sum(total_sent_lengths.values()))))
print('% of sents ≥ 25: {0:.2%}'.format((sum([v for (k, v) in total_sent_lengths.most_common() if (k > 24)]) / sum(total_sent_lengths.values()))))
print('Unique spans: ', len(unique_spans.keys()))
print('Total number of spans: ', sum(unique_spans.values()))
print('Total number of relations: ', sum(total_rel_cnt.values()))
print('Unique triggers: ', len(unique_triggers.keys()))
single_k_phrases = sum([v for (k, v) in key_phrases.items() if (k == 1)])
print('Single word keyphrases: {}({:.2%}) '.format(single_k_phrases, (single_k_phrases / sum(key_phrases.values()))))
print('Avg. tokens per keyphrase: {:.2f}'.format((sum([(k * v) for (k, v) in key_phrases.items()]) / sum(key_phrases.values()))))
|
def add_path(path):
if (path not in sys.path):
sys.path.insert(0, path)
|
def get_imdb(name):
'Get an imdb (image database) by name.'
if (name not in __sets):
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
|
def list_imdbs():
'List all registered imdbs.'
return list(__sets.keys())
|
def munge(src_dir):
files = os.listdir(src_dir)
for fn in files:
(base, ext) = os.path.splitext(fn)
first = base[:14]
second = base[:22]
dst_dir = os.path.join('MCG', 'mat', first, second)
if (not os.path.exists(dst_dir)):
os.makedirs(dst_dir)
src = os.path.join(src_dir, fn)
dst = os.path.join(dst_dir, fn)
print('MV: {} -> {}'.format(src, dst))
os.rename(src, dst)
|
class _fasterRCNN(nn.Module):
' faster RCNN '
def __init__(self, classes, class_agnostic):
super(_fasterRCNN, self).__init__()
self.classes = classes
self.n_classes = len(classes)
self.class_agnostic = class_agnostic
self.RCNN_loss_cls = 0
self.RCNN_loss_bbox = 0
self.RCNN_rpn = _RPN(self.dout_base_model)
self.RCNN_proposal_target = _ProposalTargetLayer(self.n_classes)
self.RCNN_roi_pool = ROIPool((cfg.POOLING_SIZE, cfg.POOLING_SIZE), (1.0 / 16.0))
self.RCNN_roi_align = ROIAlign((cfg.POOLING_SIZE, cfg.POOLING_SIZE), (1.0 / 16.0), 0)
def forward(self, im_data, im_info, gt_boxes, num_boxes):
batch_size = im_data.size(0)
im_info = im_info.data
gt_boxes = gt_boxes.data
num_boxes = num_boxes.data
base_feat = self.RCNN_base(im_data)
(rois, rpn_loss_cls, rpn_loss_bbox) = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)
if self.training:
roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes)
(rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws) = roi_data
rois_label = Variable(rois_label.view((- 1)).long())
rois_target = Variable(rois_target.view((- 1), rois_target.size(2)))
rois_inside_ws = Variable(rois_inside_ws.view((- 1), rois_inside_ws.size(2)))
rois_outside_ws = Variable(rois_outside_ws.view((- 1), rois_outside_ws.size(2)))
else:
rois_label = None
rois_target = None
rois_inside_ws = None
rois_outside_ws = None
rpn_loss_cls = 0
rpn_loss_bbox = 0
rois = Variable(rois)
if (cfg.POOLING_MODE == 'align'):
pooled_feat = self.RCNN_roi_align(base_feat, rois.view((- 1), 5))
elif (cfg.POOLING_MODE == 'pool'):
pooled_feat = self.RCNN_roi_pool(base_feat, rois.view((- 1), 5))
pooled_feat = self._head_to_tail(pooled_feat)
bbox_pred = self.RCNN_bbox_pred(pooled_feat)
if (self.training and (not self.class_agnostic)):
bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int((bbox_pred.size(1) / 4)), 4)
bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4))
bbox_pred = bbox_pred_select.squeeze(1)
cls_score = self.RCNN_cls_score(pooled_feat)
cls_prob = F.softmax(cls_score, 1)
RCNN_loss_cls = 0
RCNN_loss_bbox = 0
if self.training:
RCNN_loss_cls = F.cross_entropy(cls_score, rois_label)
RCNN_loss_bbox = _smooth_l1_loss(bbox_pred, rois_target, rois_inside_ws, rois_outside_ws)
cls_prob = cls_prob.view(batch_size, rois.size(1), (- 1))
bbox_pred = bbox_pred.view(batch_size, rois.size(1), (- 1))
return (rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, rois_label)
def _init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
'\n weight initalizer: truncated normal and random normal.\n '
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean)
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
normal_init(self.RCNN_rpn.RPN_Conv, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_rpn.RPN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_rpn.RPN_bbox_pred, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_bbox_pred, 0, 0.001, cfg.TRAIN.TRUNCATED)
def create_architecture(self):
self._init_modules()
self._init_weights()
|
def conv3x3(in_planes, out_planes, stride=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x
|
def resnet18(pretrained=False):
'Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [2, 2, 2, 2])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
|
def resnet34(pretrained=False):
'Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
|
def resnet50(pretrained=False):
'Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
|
def resnet101(pretrained=False):
'Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 23, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
|
def resnet152(pretrained=False):
'Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 8, 36, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
class resnet(_fasterRCNN):
def __init__(self, classes, num_layers=101, pretrained=False, class_agnostic=False):
self.model_path = 'data/pretrained_model/resnet101_caffe.pth'
self.dout_base_model = 1024
self.pretrained = pretrained
self.class_agnostic = class_agnostic
_fasterRCNN.__init__(self, classes, class_agnostic)
def _init_modules(self):
resnet = resnet101()
if (self.pretrained == True):
print(('Loading pretrained weights from %s' % self.model_path))
state_dict = torch.load(self.model_path)
resnet.load_state_dict({k: v for (k, v) in state_dict.items() if (k in resnet.state_dict())})
self.RCNN_base = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1, resnet.layer2, resnet.layer3)
self.RCNN_top = nn.Sequential(resnet.layer4)
self.RCNN_cls_score = nn.Linear(2048, self.n_classes)
if self.class_agnostic:
self.RCNN_bbox_pred = nn.Linear(2048, 4)
else:
self.RCNN_bbox_pred = nn.Linear(2048, (4 * self.n_classes))
for p in self.RCNN_base[0].parameters():
p.requires_grad = False
for p in self.RCNN_base[1].parameters():
p.requires_grad = False
assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)
if (cfg.RESNET.FIXED_BLOCKS >= 3):
for p in self.RCNN_base[6].parameters():
p.requires_grad = False
if (cfg.RESNET.FIXED_BLOCKS >= 2):
for p in self.RCNN_base[5].parameters():
p.requires_grad = False
if (cfg.RESNET.FIXED_BLOCKS >= 1):
for p in self.RCNN_base[4].parameters():
p.requires_grad = False
def set_bn_fix(m):
classname = m.__class__.__name__
if (classname.find('BatchNorm') != (- 1)):
for p in m.parameters():
p.requires_grad = False
self.RCNN_base.apply(set_bn_fix)
self.RCNN_top.apply(set_bn_fix)
def train(self, mode=True):
nn.Module.train(self, mode)
if mode:
self.RCNN_base.eval()
self.RCNN_base[5].train()
self.RCNN_base[6].train()
def set_bn_eval(m):
classname = m.__class__.__name__
if (classname.find('BatchNorm') != (- 1)):
m.eval()
self.RCNN_base.apply(set_bn_eval)
self.RCNN_top.apply(set_bn_eval)
def _head_to_tail(self, pool5):
fc7 = self.RCNN_top(pool5).mean(3).mean(2)
return fc7
|
class vgg16(_fasterRCNN):
def __init__(self, classes, pretrained=False, class_agnostic=False):
self.model_path = 'data/pretrained_model/vgg16_caffe.pth'
self.dout_base_model = 512
self.pretrained = pretrained
self.class_agnostic = class_agnostic
_fasterRCNN.__init__(self, classes, class_agnostic)
def _init_modules(self):
vgg = models.vgg16()
if self.pretrained:
print(('Loading pretrained weights from %s' % self.model_path))
state_dict = torch.load(self.model_path)
vgg.load_state_dict({k: v for (k, v) in state_dict.items() if (k in vgg.state_dict())})
vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:(- 1)])
self.RCNN_base = nn.Sequential(*list(vgg.features._modules.values())[:(- 1)])
for layer in range(10):
for p in self.RCNN_base[layer].parameters():
p.requires_grad = False
self.RCNN_top = vgg.classifier
self.RCNN_cls_score = nn.Linear(4096, self.n_classes)
if self.class_agnostic:
self.RCNN_bbox_pred = nn.Linear(4096, 4)
else:
self.RCNN_bbox_pred = nn.Linear(4096, (4 * self.n_classes))
def _head_to_tail(self, pool5):
pool5_flat = pool5.view(pool5.size(0), (- 1))
fc7 = self.RCNN_top(pool5_flat)
return fc7
|
def _import_symbols(locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
if callable(fn):
locals[symbol] = _wrap_function(fn, _ffi)
else:
locals[symbol] = fn
__all__.append(symbol)
|
def nms_gpu(dets, thresh):
keep = dets.new(dets.size(0), 1).zero_().int()
num_out = dets.new(1).zero_().int()
nms.nms_cuda(keep, dets, num_out, thresh)
keep = keep[:num_out[0]]
return keep
|
def nms(dets, thresh, force_cpu=False):
'Dispatch to either CPU or GPU NMS implementations.'
if (dets.shape[0] == 0):
return []
return (nms_gpu(dets, thresh) if (force_cpu == False) else nms_cpu(dets, thresh))
|
def _import_symbols(locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
if callable(fn):
locals[symbol] = _wrap_function(fn, _ffi)
else:
locals[symbol] = fn
__all__.append(symbol)
|
class RoIAlignFunction(Function):
def __init__(self, aligned_height, aligned_width, spatial_scale):
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
self.rois = None
self.feature_size = None
def forward(self, features, rois):
self.rois = rois
self.feature_size = features.size()
(batch_size, num_channels, data_height, data_width) = features.size()
num_rois = rois.size(0)
output = features.new(num_rois, num_channels, self.aligned_height, self.aligned_width).zero_()
if features.is_cuda:
roi_align.roi_align_forward_cuda(self.aligned_height, self.aligned_width, self.spatial_scale, features, rois, output)
else:
roi_align.roi_align_forward(self.aligned_height, self.aligned_width, self.spatial_scale, features, rois, output)
return output
def backward(self, grad_output):
assert ((self.feature_size is not None) and grad_output.is_cuda)
(batch_size, num_channels, data_height, data_width) = self.feature_size
grad_input = self.rois.new(batch_size, num_channels, data_height, data_width).zero_()
roi_align.roi_align_backward_cuda(self.aligned_height, self.aligned_width, self.spatial_scale, grad_output, self.rois, grad_input)
return (grad_input, None)
|
class RoIAlign(Module):
def __init__(self, aligned_height, aligned_width, spatial_scale):
super(RoIAlign, self).__init__()
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
def forward(self, features, rois):
return RoIAlignFunction(self.aligned_height, self.aligned_width, self.spatial_scale)(features, rois)
|
class RoIAlignAvg(Module):
def __init__(self, aligned_height, aligned_width, spatial_scale):
super(RoIAlignAvg, self).__init__()
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
def forward(self, features, rois):
x = RoIAlignFunction((self.aligned_height + 1), (self.aligned_width + 1), self.spatial_scale)(features, rois)
return avg_pool2d(x, kernel_size=2, stride=1)
|
class RoIAlignMax(Module):
def __init__(self, aligned_height, aligned_width, spatial_scale):
super(RoIAlignMax, self).__init__()
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
def forward(self, features, rois):
x = RoIAlignFunction((self.aligned_height + 1), (self.aligned_width + 1), self.spatial_scale)(features, rois)
return max_pool2d(x, kernel_size=2, stride=1)
|
def _import_symbols(locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
locals[symbol] = _wrap_function(fn, _ffi)
__all__.append(symbol)
|
def _import_symbols(locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
if callable(fn):
locals[symbol] = _wrap_function(fn, _ffi)
else:
locals[symbol] = fn
__all__.append(symbol)
|
class RoICropFunction(Function):
def forward(self, input1, input2):
self.input1 = input1
self.input2 = input2
self.device_c = ffi.new('int *')
output = torch.zeros(input2.size()[0], input1.size()[1], input2.size()[1], input2.size()[2])
if input1.is_cuda:
self.device = torch.cuda.current_device()
else:
self.device = (- 1)
self.device_c[0] = self.device
if (not input1.is_cuda):
roi_crop.BilinearSamplerBHWD_updateOutput(input1, input2, output)
else:
output = output.cuda(self.device)
roi_crop.BilinearSamplerBHWD_updateOutput_cuda(input1, input2, output)
return output
def backward(self, grad_output):
grad_input1 = torch.zeros(self.input1.size())
grad_input2 = torch.zeros(self.input2.size())
if (not grad_output.is_cuda):
roi_crop.BilinearSamplerBHWD_updateGradInput(self.input1, self.input2, grad_input1, grad_input2, grad_output)
else:
grad_input1 = grad_input1.cuda(self.device)
grad_input2 = grad_input2.cuda(self.device)
roi_crop.BilinearSamplerBHWD_updateGradInput_cuda(self.input1, self.input2, grad_input1, grad_input2, grad_output)
return (grad_input1, grad_input2)
|
class RoICropFunction(Function):
def forward(self, input1, input2):
self.input1 = input1.clone()
self.input2 = input2.clone()
output = input2.new(input2.size()[0], input1.size()[1], input2.size()[1], input2.size()[2]).zero_()
assert (output.get_device() == input1.get_device()), 'output and input1 must on the same device'
assert (output.get_device() == input2.get_device()), 'output and input2 must on the same device'
roi_crop.BilinearSamplerBHWD_updateOutput_cuda(input1, input2, output)
return output
def backward(self, grad_output):
grad_input1 = self.input1.new(self.input1.size()).zero_()
grad_input2 = self.input2.new(self.input2.size()).zero_()
roi_crop.BilinearSamplerBHWD_updateGradInput_cuda(self.input1, self.input2, grad_input1, grad_input2, grad_output)
return (grad_input1, grad_input2)
|
class _RoICrop(Module):
def __init__(self, layout='BHWD'):
super(_RoICrop, self).__init__()
def forward(self, input1, input2):
return RoICropFunction()(input1, input2)
|
class _ROIAlign(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
output = _C.roi_align_forward(input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(rois,) = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
(bs, ch, h, w) = ctx.input_shape
grad_input = _C.roi_align_backward(grad_output, rois, spatial_scale, output_size[0], output_size[1], bs, ch, h, w, sampling_ratio)
return (grad_input, None, None, None, None)
|
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
super(ROIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input, rois):
return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio)
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('output_size=' + str(self.output_size))
tmpstr += (', spatial_scale=' + str(self.spatial_scale))
tmpstr += (', sampling_ratio=' + str(self.sampling_ratio))
tmpstr += ')'
return tmpstr
|
class _ROIPool(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.input_shape = input.size()
(output, argmax) = _C.roi_pool_forward(input, roi, spatial_scale, output_size[0], output_size[1])
ctx.save_for_backward(input, roi, argmax)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(input, rois, argmax) = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
(bs, ch, h, w) = ctx.input_shape
grad_input = _C.roi_pool_backward(grad_output, input, rois, argmax, spatial_scale, output_size[0], output_size[1], bs, ch, h, w)
return (grad_input, None, None, None)
|
class ROIPool(nn.Module):
def __init__(self, output_size, spatial_scale):
super(ROIPool, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
def forward(self, input, rois):
return roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('output_size=' + str(self.output_size))
tmpstr += (', spatial_scale=' + str(self.spatial_scale))
tmpstr += ')'
return tmpstr
|
def _import_symbols(locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
if callable(fn):
locals[symbol] = _wrap_function(fn, _ffi)
else:
locals[symbol] = fn
__all__.append(symbol)
|
class RoIPoolFunction(Function):
def __init__(ctx, pooled_height, pooled_width, spatial_scale):
ctx.pooled_width = pooled_width
ctx.pooled_height = pooled_height
ctx.spatial_scale = spatial_scale
ctx.feature_size = None
def forward(ctx, features, rois):
ctx.feature_size = features.size()
(batch_size, num_channels, data_height, data_width) = ctx.feature_size
num_rois = rois.size(0)
output = features.new(num_rois, num_channels, ctx.pooled_height, ctx.pooled_width).zero_()
ctx.argmax = features.new(num_rois, num_channels, ctx.pooled_height, ctx.pooled_width).zero_().int()
ctx.rois = rois
if (not features.is_cuda):
_features = features.permute(0, 2, 3, 1)
roi_pooling.roi_pooling_forward(ctx.pooled_height, ctx.pooled_width, ctx.spatial_scale, _features, rois, output)
else:
roi_pooling.roi_pooling_forward_cuda(ctx.pooled_height, ctx.pooled_width, ctx.spatial_scale, features, rois, output, ctx.argmax)
return output
def backward(ctx, grad_output):
assert ((ctx.feature_size is not None) and grad_output.is_cuda)
(batch_size, num_channels, data_height, data_width) = ctx.feature_size
grad_input = grad_output.new(batch_size, num_channels, data_height, data_width).zero_()
roi_pooling.roi_pooling_backward_cuda(ctx.pooled_height, ctx.pooled_width, ctx.spatial_scale, grad_output, ctx.rois, grad_input, ctx.argmax)
return (grad_input, None)
|
class _RoIPooling(Module):
def __init__(self, pooled_height, pooled_width, spatial_scale):
super(_RoIPooling, self).__init__()
self.pooled_width = int(pooled_width)
self.pooled_height = int(pooled_height)
self.spatial_scale = float(spatial_scale)
def forward(self, features, rois):
return RoIPoolFunction(self.pooled_height, self.pooled_width, self.spatial_scale)(features, rois)
|
class _RPN(nn.Module):
' region proposal network '
def __init__(self, din):
super(_RPN, self).__init__()
self.din = din
self.anchor_scales = cfg.ANCHOR_SCALES
self.anchor_ratios = cfg.ANCHOR_RATIOS
self.feat_stride = cfg.FEAT_STRIDE[0]
self.RPN_Conv = nn.Conv2d(self.din, 512, 3, 1, 1, bias=True)
self.nc_score_out = ((len(self.anchor_scales) * len(self.anchor_ratios)) * 2)
self.RPN_cls_score = nn.Conv2d(512, self.nc_score_out, 1, 1, 0)
self.nc_bbox_out = ((len(self.anchor_scales) * len(self.anchor_ratios)) * 4)
self.RPN_bbox_pred = nn.Conv2d(512, self.nc_bbox_out, 1, 1, 0)
self.RPN_proposal = _ProposalLayer(self.feat_stride, self.anchor_scales, self.anchor_ratios)
self.RPN_anchor_target = _AnchorTargetLayer(self.feat_stride, self.anchor_scales, self.anchor_ratios)
self.rpn_loss_cls = 0
self.rpn_loss_box = 0
@staticmethod
def reshape(x, d):
input_shape = x.size()
x = x.view(input_shape[0], int(d), int((float((input_shape[1] * input_shape[2])) / float(d))), input_shape[3])
return x
def forward(self, base_feat, im_info, gt_boxes, num_boxes):
batch_size = base_feat.size(0)
rpn_conv1 = F.relu(self.RPN_Conv(base_feat), inplace=True)
rpn_cls_score = self.RPN_cls_score(rpn_conv1)
rpn_cls_score_reshape = self.reshape(rpn_cls_score, 2)
rpn_cls_prob_reshape = F.softmax(rpn_cls_score_reshape, 1)
rpn_cls_prob = self.reshape(rpn_cls_prob_reshape, self.nc_score_out)
rpn_bbox_pred = self.RPN_bbox_pred(rpn_conv1)
cfg_key = ('TRAIN' if self.training else 'TEST')
rois = self.RPN_proposal((rpn_cls_prob.data, rpn_bbox_pred.data, im_info, cfg_key))
self.rpn_loss_cls = 0
self.rpn_loss_box = 0
if self.training:
assert (gt_boxes is not None)
rpn_data = self.RPN_anchor_target((rpn_cls_score.data, gt_boxes, im_info, num_boxes))
rpn_cls_score = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(batch_size, (- 1), 2)
rpn_label = rpn_data[0].view(batch_size, (- 1))
rpn_keep = Variable(rpn_label.view((- 1)).ne((- 1)).nonzero().view((- 1)))
rpn_cls_score = torch.index_select(rpn_cls_score.view((- 1), 2), 0, rpn_keep)
rpn_label = torch.index_select(rpn_label.view((- 1)), 0, rpn_keep.data)
rpn_label = Variable(rpn_label.long())
self.rpn_loss_cls = F.cross_entropy(rpn_cls_score, rpn_label)
fg_cnt = torch.sum(rpn_label.data.ne(0))
(rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights) = rpn_data[1:]
rpn_bbox_inside_weights = Variable(rpn_bbox_inside_weights)
rpn_bbox_outside_weights = Variable(rpn_bbox_outside_weights)
rpn_bbox_targets = Variable(rpn_bbox_targets)
self.rpn_loss_box = _smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights, sigma=3, dim=[1, 2, 3])
return (rois, self.rpn_loss_cls, self.rpn_loss_box)
|
def get_output_dir(imdb, weights_filename):
'Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n '
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if (weights_filename is None):
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if (not os.path.exists(outdir)):
os.makedirs(outdir)
return outdir
|
def get_output_tb_dir(imdb, weights_filename):
'Return the directory where tensorflow summaries are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n '
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
if (weights_filename is None):
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if (not os.path.exists(outdir)):
os.makedirs(outdir)
return outdir
|
def _merge_a_into_b(a, b):
'Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n '
if (type(a) is not edict):
return
for (k, v) in a.items():
if (k not in b):
raise KeyError('{} is not a valid config key'.format(k))
old_type = type(b[k])
if (old_type is not type(v)):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k]), type(v), k))
if (type(v) is edict):
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
|
def cfg_from_file(filename):
'Load a config file and merge it into the default options.'
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
|
def cfg_from_list(cfg_list):
'Set config keys via list (e.g., from command line).'
from ast import literal_eval
assert ((len(cfg_list) % 2) == 0)
for (k, v) in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:(- 1)]:
assert (subkey in d)
d = d[subkey]
subkey = key_list[(- 1)]
assert (subkey in d)
try:
value = literal_eval(v)
except:
value = v
assert (type(value) == type(d[subkey])), 'type {} does not match original type {}'.format(type(value), type(d[subkey]))
d[subkey] = value
|
class Logger(object):
def __init__(self, log_dir):
'Create a summary writer logging to log_dir.'
self.writer = tf.summary.FileWriter(log_dir)
def scalar_summary(self, tag, value, step):
'Log a scalar variable.'
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def image_summary(self, tag, images, step):
'Log a list of images.'
img_summaries = []
for (i, img) in enumerate(images):
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(img).save(s, format='png')
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(), height=img.shape[0], width=img.shape[1])
img_summaries.append(tf.Summary.Value(tag=('%s/%d' % (tag, i)), image=img_sum))
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
'Log a histogram of the tensor of values.'
(counts, bin_edges) = np.histogram(values, bins=bins)
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum((values ** 2)))
bin_edges = bin_edges[1:]
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
|
def prepare_roidb(imdb):
"Enrich the imdb's roidb by adding some derived quantities that\n are useful for training. This function precomputes the maximum\n overlap, taken over ground-truth boxes, between each ROI and\n each ground-truth box. The class with maximum overlap is also\n recorded.\n "
roidb = imdb.roidb
if (not imdb.name.startswith('coco')):
sizes = [PIL.Image.open(imdb.image_path_at(i)).size for i in range(imdb.num_images)]
for i in range(len(imdb.image_index)):
roidb[i]['img_id'] = imdb.image_id_at(i)
roidb[i]['image'] = imdb.image_path_at(i)
if (not imdb.name.startswith('coco')):
roidb[i]['width'] = sizes[i][0]
roidb[i]['height'] = sizes[i][1]
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
max_overlaps = gt_overlaps.max(axis=1)
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_classes'] = max_classes
roidb[i]['max_overlaps'] = max_overlaps
zero_inds = np.where((max_overlaps == 0))[0]
assert all((max_classes[zero_inds] == 0))
nonzero_inds = np.where((max_overlaps > 0))[0]
assert all((max_classes[nonzero_inds] != 0))
|
def rank_roidb_ratio(roidb):
ratio_large = 2
ratio_small = 0.5
ratio_list = []
for i in range(len(roidb)):
width = roidb[i]['width']
height = roidb[i]['height']
ratio = (width / float(height))
if (ratio > ratio_large):
roidb[i]['need_crop'] = 1
ratio = ratio_large
elif (ratio < ratio_small):
roidb[i]['need_crop'] = 1
ratio = ratio_small
else:
roidb[i]['need_crop'] = 0
ratio_list.append(ratio)
ratio_list = np.array(ratio_list)
ratio_index = np.argsort(ratio_list)
return (ratio_list[ratio_index], ratio_index)
|
def filter_roidb(roidb):
print(('before filtering, there are %d images...' % len(roidb)))
i = 0
while (i < len(roidb)):
if (len(roidb[i]['boxes']) == 0):
del roidb[i]
i -= 1
i += 1
print(('after filtering, there are %d images...' % len(roidb)))
return roidb
|
def combined_roidb(imdb_names, training=True):
'\n Combine multiple roidbs\n '
def get_training_roidb(imdb):
'Returns a roidb (Region of Interest database) for use in training.'
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
prepare_roidb(imdb)
print('done')
return imdb.roidb
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if (len(roidbs) > 1):
for r in roidbs[1:]:
roidb.extend(r)
tmp = get_imdb(imdb_names.split('+')[1])
imdb = datasets.imdb.imdb(imdb_names, tmp.classes)
else:
imdb = get_imdb(imdb_names)
if training:
roidb = filter_roidb(roidb)
(ratio_list, ratio_index) = rank_roidb_ratio(roidb)
return (imdb, roidb, ratio_list, ratio_index)
|
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, 'model', 'csrc')
main_file = glob.glob(os.path.join(extensions_dir, '*.cpp'))
source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp'))
source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu'))
sources = (main_file + source_cpu)
extension = CppExtension
extra_compile_args = {'cxx': []}
define_macros = []
if (torch.cuda.is_available() and (CUDA_HOME is not None)):
extension = CUDAExtension
sources += source_cuda
define_macros += [('WITH_CUDA', None)]
extra_compile_args['nvcc'] = ['-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [extension('model._C', sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args)]
return ext_modules
|
def parse_args():
'\n Parse input arguments\n '
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset', help='training dataset', default='pascal_voc', type=str)
parser.add_argument('--net', dest='net', help='vgg16, res101', default='vgg16', type=str)
parser.add_argument('--start_epoch', dest='start_epoch', help='starting epoch', default=1, type=int)
parser.add_argument('--epochs', dest='max_epochs', help='number of epochs to train', default=20, type=int)
parser.add_argument('--disp_interval', dest='disp_interval', help='number of iterations to display', default=100, type=int)
parser.add_argument('--checkpoint_interval', dest='checkpoint_interval', help='number of iterations to display', default=10000, type=int)
parser.add_argument('--save_dir', dest='save_dir', help='directory to save models', default='models', type=str)
parser.add_argument('--nw', dest='num_workers', help='number of worker to load data', default=0, type=int)
parser.add_argument('--cuda', dest='cuda', help='whether use CUDA', action='store_true')
parser.add_argument('--ls', dest='large_scale', help='whether use large imag scale', action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs', help='whether use multiple GPUs', action='store_true')
parser.add_argument('--bs', dest='batch_size', help='batch_size', default=1, type=int)
parser.add_argument('--cag', dest='class_agnostic', help='whether perform class_agnostic bbox regression', action='store_true')
parser.add_argument('--o', dest='optimizer', help='training optimizer', default='sgd', type=str)
parser.add_argument('--lr', dest='lr', help='starting learning rate', default=0.001, type=float)
parser.add_argument('--lr_decay_step', dest='lr_decay_step', help='step to do learning rate decay, unit is epoch', default=5, type=int)
parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma', help='learning rate decay ratio', default=0.1, type=float)
parser.add_argument('--s', dest='session', help='training session', default=1, type=int)
parser.add_argument('--r', dest='resume', help='resume checkpoint or not', default=False, type=bool)
parser.add_argument('--checksession', dest='checksession', help='checksession to load model', default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch', help='checkepoch to load model', default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint', help='checkpoint to load model', default=0, type=int)
parser.add_argument('--use_tfb', dest='use_tfboard', help='whether use tensorboard', action='store_true')
args = parser.parse_args()
return args
|
class sampler(Sampler):
def __init__(self, train_size, batch_size):
self.num_data = train_size
self.num_per_batch = int((train_size / batch_size))
self.batch_size = batch_size
self.range = torch.arange(0, batch_size).view(1, batch_size).long()
self.leftover_flag = False
if (train_size % batch_size):
self.leftover = torch.arange((self.num_per_batch * batch_size), train_size).long()
self.leftover_flag = True
def __iter__(self):
rand_num = (torch.randperm(self.num_per_batch).view((- 1), 1) * self.batch_size)
self.rand_num = (rand_num.expand(self.num_per_batch, self.batch_size) + self.range)
self.rand_num_view = self.rand_num.view((- 1))
if self.leftover_flag:
self.rand_num_view = torch.cat((self.rand_num_view, self.leftover), 0)
return iter(self.rand_num_view)
def __len__(self):
return self.num_data
|
def compute_auc(s_error, p_error, a_error):
assert (len(s_error) == 71)
assert (len(p_error) == 48)
assert (len(a_error) == 14)
s_error = np.array(s_error)
p_error = np.array(p_error)
a_error = np.array(a_error)
limit = 25
gs_error = np.zeros((limit + 1))
gp_error = np.zeros((limit + 1))
ga_error = np.zeros((limit + 1))
accum_s = 0
accum_p = 0
accum_a = 0
for i in range(1, (limit + 1)):
gs_error[i] = ((np.sum((s_error < i)) * 100) / len(s_error))
gp_error[i] = ((np.sum((p_error < i)) * 100) / len(p_error))
ga_error[i] = ((np.sum((a_error < i)) * 100) / len(a_error))
accum_s = (accum_s + gs_error[i])
accum_p = (accum_p + gp_error[i])
accum_a = (accum_a + ga_error[i])
auc_s = (accum_s / (limit * 100))
auc_p = (accum_p / (limit * 100))
auc_a = (accum_a / (limit * 100))
mAUC = (((auc_s + auc_p) + auc_a) / 3.0)
return {'s': auc_s, 'p': auc_p, 'a': auc_a, 'mAUC': mAUC}
|
def affine_images(images, used_for='detector'):
'\n Perform affine transformation on images\n :param images: (B, C, H, W)\n :param keypoint_labels: corresponding labels\n :param value_map: value maps, used to record history learned geo_points\n :return: results of affine images, affine labels, affine value maps, affine transformed grid_inverse, inverse transformed grid_inverse\n '
(h, w) = images.shape[2:]
theta = None
thetaI = None
for i in range(len(images)):
if (used_for == 'detector'):
affine_params = T.RandomAffine(20).get_params(degrees=[(- 15), 15], translate=[0.2, 0.2], scale_ranges=[0.9, 1.35], shears=None, img_size=[h, w])
else:
affine_params = T.RandomAffine(20).get_params(degrees=[(- 3), 3], translate=[0.1, 0.1], scale_ranges=[0.9, 1.1], shears=None, img_size=[h, w])
angle = (((- affine_params[0]) * math.pi) / 180)
theta_ = torch.tensor([[((1 / affine_params[2]) * math.cos(angle)), math.sin((- angle)), ((- affine_params[1][0]) / images.shape[2])], [math.sin(angle), ((1 / affine_params[2]) * math.cos(angle)), ((- affine_params[1][1]) / images.shape[3])], [0, 0, 1]], dtype=torch.float).to(images)
thetaI_ = theta_.inverse()
theta_ = theta_[:2]
thetaI_ = thetaI_[:2]
theta_ = theta_.unsqueeze(0)
thetaI_ = thetaI_.unsqueeze(0)
if (theta is None):
theta = theta_
else:
theta = torch.cat((theta, theta_))
if (thetaI is None):
thetaI = thetaI_
else:
thetaI = torch.cat((thetaI, thetaI_))
grid = F.affine_grid(theta, images.size(), align_corners=True)
grid = grid.to(images)
grid_inverse = F.affine_grid(thetaI, images.size(), align_corners=True)
grid_inverse = grid_inverse.to(images)
output = F.grid_sample(images, grid, align_corners=True)
if (used_for == 'descriptor'):
if (random.random() >= 0.4):
output = output.repeat(1, 3, 1, 1)
output = T.ColorJitter(brightness=0.4, contrast=0.3, saturation=0.3, hue=0.2)(output)
output = T.Grayscale()(output)
return (output.detach().clone(), grid, grid_inverse)
|
def get_gaussian_kernel(kernlen=21, nsig=5):
'Get kernels used for generating Gaussian heatmaps'
interval = (((2 * nsig) + 1.0) / kernlen)
x = np.linspace(((- nsig) - (interval / 2.0)), (nsig + (interval / 2.0)), (kernlen + 1))
kern1d = np.diff(st.norm.cdf(x))
kernel_raw = np.sqrt(np.outer(kern1d, kern1d))
kernel = (kernel_raw / kernel_raw.sum())
kernel = torch.FloatTensor(kernel).unsqueeze(0).unsqueeze(0)
weight = torch.nn.Parameter(data=kernel, requires_grad=False)
weight = ((weight - weight.min()) / (weight.max() - weight.min()))
return weight
|
def value_map_load(save_dir, names, input_with_label, shape=(768, 768), value_maps_running=None):
value_maps = []
for (s, name) in enumerate(names):
path = os.path.join(save_dir, (name.split('.')[0] + '.png'))
if (input_with_label[s] and (value_maps_running is not None) and (name in value_maps_running)):
value_map = value_maps_running[name]
elif (input_with_label[s] and (value_maps_running is None) and os.path.exists(path)):
value_map = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
else:
value_map = np.zeros([shape[0], shape[1]]).astype(np.uint8)
value_map = torch.from_numpy(value_map).unsqueeze(0).unsqueeze(0)
value_maps.append(value_map)
return torch.cat(value_maps)
|
def value_map_save(save_dir, names, input_with_label, value_maps, value_maps_running=None):
for (s, name) in enumerate(names):
if input_with_label[s]:
vp = value_maps[s].squeeze().numpy()
if (value_maps_running is not None):
value_maps_running[name] = vp
else:
path = os.path.join(save_dir, (name.split('.')[0] + '.png'))
cv2.imwrite(path, vp)
|
def train_model(model, optimizer, dataloaders, device, num_epochs, train_config):
model_save_path = train_config['model_save_path']
model_save_epoch = train_config['model_save_epoch']
pke_start_epoch = train_config['pke_start_epoch']
pke_show_epoch = train_config['pke_show_epoch']
pke_show_list = train_config['pke_show_list']
is_value_map_save = train_config['is_value_map_save']
value_map_save_dir = train_config['value_map_save_dir']
if is_value_map_save:
if os.path.exists(value_map_save_dir):
shutil.rmtree(value_map_save_dir)
os.makedirs(value_map_save_dir)
value_maps_running = None
if (not is_value_map_save):
value_maps_running = {}
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, (num_epochs - 1)))
phases = list(dataloaders.keys())
model.PKE_learn = True
if (epoch < pke_start_epoch):
model.PKE_learn = False
for phase in phases:
pke_show = train_config['pke_show']
image_shows = []
init_kp_shows = []
label_shows = []
enhanced_kp_shows = []
show_names = []
if ('val' in phase):
if ((epoch % model_save_epoch) == 0):
print(f'save model for epoch {epoch}')
state = {'net': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch}
torch.save(state, model_save_path)
continue
print((((((('-' * 10) + 'phase:') + phase) + '\t PKE_learn:') + str(model.PKE_learn)) + ('-' * 10)))
if ('train' in phase):
for param_group in optimizer.param_groups:
print('LR', param_group['lr'])
model.train()
else:
model.eval()
epoch_samples = 0
print_descriptor_loss = 0
print_detector_loss = 0
num_learned_pts = 0
num_input_with_label = 0
num_input_descriptor = 0
for (images, input_with_label, keypoint_positions, label_names) in tqdm(dataloaders[phase]):
batch_size = images.shape[0]
learn_index = torch.where(input_with_label)
images = images.to(device)
value_maps = value_map_load(value_map_save_dir, label_names, input_with_label, images.shape[(- 2):], value_maps_running)
value_maps = value_maps.to(device)
keypoint_positions = keypoint_positions.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(('train' in phase)):
(loss, number_pts_one, print_loss_detector_one, print_loss_descriptor_one, enhanced_label_pts, enhanced_label, detector_pred, loss_detector_num, loss_descriptor_num) = model(images, keypoint_positions, value_maps, learn_index)
if (enhanced_label_pts is None):
enhanced_label_pts = keypoint_positions
if pke_show:
if ((len(pke_show_list) == 0) and (len(learn_index[0]) != 0)):
show_names = [(label_names[learn_index[0][0]], learn_index[0][0])]
pke_show = False
for (s, label_path) in enumerate(label_names):
file_name = os.path.split(label_path)[(- 1)].split('.')[0]
if (file_name in pke_show_list):
show_names.append((label_path, s))
for (show_name, idx) in show_names:
image_shows.append(images[idx][0].cpu().data)
init_kp_shows.append(keypoint_positions[idx][0].cpu().data)
label_shows.append(enhanced_label[idx][0].cpu().data)
pred_show = detector_pred.detach().cpu()[idx][0]
enhanced_kp_shows.append(enhanced_label_pts[idx][0].cpu().data)
print_detector_loss += (print_loss_detector_one * len(learn_index[0]))
print_descriptor_loss += (print_loss_descriptor_one * batch_size)
num_input_with_label += loss_detector_num
num_learned_pts += number_pts_one
if ('train' in phase):
loss.backward()
optimizer.step()
if (len(learn_index[0]) != 0):
value_maps = value_maps.cpu()
value_map_save(value_map_save_dir, label_names, input_with_label, value_maps, value_maps_running)
num_input_descriptor += loss_descriptor_num
epoch_samples += batch_size
print_detector_loss = (print_detector_loss / num_input_with_label)
print_descriptor_loss = (print_descriptor_loss / epoch_samples)
print(phase, 'overall loss: {}'.format((print_detector_loss + print_descriptor_loss)), 'detector_loss: {} of {} nums, #avg learned keypoints:{} '.format(print_detector_loss, num_input_with_label, (num_learned_pts / num_input_with_label)), 'descriptor_loss: {} of {} nums'.format(print_descriptor_loss, num_input_descriptor))
for (s, (name, _)) in enumerate(show_names):
if (not ((epoch % pke_show_epoch) == 0)):
break
input_show = image_shows[s]
label_show = label_shows[s]
init_kp_show = init_kp_shows[s]
enhanced_kp_show = enhanced_kp_shows[s]
name = os.path.split(name)[(- 1)].split('.')[0]
plt.figure(dpi=100)
plt.imshow(input_show, 'gray')
plt.title('epoch:{}, phase:{}, name:{}'.format(epoch, phase, name))
plt.axis('off')
try:
(y, x) = torch.where((enhanced_kp_show.cpu() == 1))
plt.scatter(x, y, s=2, c='springgreen')
except Exception:
pass
try:
(y, x) = torch.where((init_kp_show.cpu() == 1))
plt.scatter(x, y, s=2, c='b')
except Exception:
pass
plt.show()
plt.figure(dpi=200)
plt.subplot(121)
plt.imshow(pred_show, 'gray')
plt.subplot(122)
plt.imshow(label_show, 'gray')
plt.show()
plt.close()
plt.pause(0.1)
|
class DiceBCELoss(nn.Module):
def __init__(self, weight=None, size_average=True):
super(DiceBCELoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
inputs = inputs.view((- 1))
targets = targets.view((- 1))
intersection = (inputs * targets).sum()
dice_loss = (1 - (((2.0 * intersection) + smooth) / ((inputs.sum() + targets.sum()) + smooth)))
BCE = F.binary_cross_entropy(inputs, targets, reduction='mean')
Dice_BCE = ((0.01 * BCE) + dice_loss)
return Dice_BCE
|