code
stringlengths 17
6.64M
|
---|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default='glove')
parser.add_argument('--name')
args = parser.parse_args()
args = parser.parse_args()
train = spider.SpiderDataset(paths=('data/spider-20190205/train_spider.json', 'data/spider-20190205/train_others.json'), tables_paths=('data/spider-20190205/tables.json',), db_path='data/spider-20190205/database')
dev = spider.SpiderDataset(paths=('data/spider-20190205/dev.json',), tables_paths=('data/spider-20190205/tables.json',), db_path='data/spider-20190205/database')
if (args.mode == 'glove'):
embedder = pretrained_embeddings.GloVe((args.name or '42B'))
elif (args.mode == 'bpemb'):
embedder = pretrained_embeddings.BPEmb(dim=100, vocab_size=int((args.name or 10000)))
(t_g_present, t_g_missing) = count_glove(train, embedder)
(d_g_present, d_g_missing) = count_glove(dev, embedder)
import IPython
IPython.embed()
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', required=True)
parser.add_argument('--config-args')
args = parser.parse_args()
if args.config_args:
config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={'args': args.config_args}))
else:
config = json.loads(_jsonnet.evaluate_file(args.config))
train_data = registry.construct('dataset', config['data']['train'])
grammar = registry.construct('grammar', config['model']['decoder_preproc']['grammar'])
base_grammar = registry.construct('grammar', config['model']['decoder_preproc']['grammar']['base_grammar'])
for (i, item) in enumerate(tqdm.tqdm(train_data, dynamic_ncols=True)):
parsed = grammar.parse(item.code, 'train')
orig_parsed = base_grammar.parse(item.orig['orig'], 'train')
canonicalized_orig_code = base_grammar.unparse(base_grammar.parse(item.orig['orig'], 'train'), item)
unparsed = grammar.unparse(parsed, item)
if (canonicalized_orig_code != unparsed):
print('Original tree:')
pprint.pprint(orig_parsed)
print('Rewritten tree:')
pprint.pprint(parsed)
print('Reconstructed tree:')
pprint.pprint(grammar._expand_templates(parsed))
print('Original code:')
print(canonicalized_orig_code)
print('Reconstructed code:')
print(unparsed)
import IPython
IPython.embed()
break
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', required=True)
parser.add_argument('--config-args')
parser.add_argument('--output', required=True)
args = parser.parse_args()
if args.config_args:
config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={'args': args.config_args}))
else:
config = json.loads(_jsonnet.evaluate_file(args.config))
os.makedirs(args.output, exist_ok=True)
gold = open(os.path.join(args.output, 'gold.txt'), 'w')
predicted = open(os.path.join(args.output, 'predicted.txt'), 'w')
train_data = registry.construct('dataset', config['data']['train'])
grammar = registry.construct('grammar', config['model']['decoder_preproc']['grammar'])
evaluator = evaluation.Evaluator('data/spider-20190205/database', evaluation.build_foreign_key_map_from_json('data/spider-20190205/tables.json'), 'match')
for (i, item) in enumerate(tqdm.tqdm(train_data, dynamic_ncols=True)):
parsed = grammar.parse(item.code, 'train')
sql = grammar.unparse(parsed, item)
evaluator.evaluate_one(item.schema.db_id, item.orig['query'].replace('\t', ' '), sql)
gold.write('{}\t{}\n'.format(item.orig['query'].replace('\t', ' '), item.schema.db_id))
predicted.write('{}\n'.format(sql))
|
class IdentitySet(collections.abc.MutableSet):
def __init__(self, iterable=()):
self.map = {id(x): x for x in iterable}
def __contains__(self, value):
return (id(value) in self.map)
def __iter__(self):
return self.map.values()
def __len__(self):
return len(self.map)
def add(self, value):
self.map[id(value)] = value
def discard(self, value):
self.map.pop(id(value))
|
@attr.s
class TypeInfo():
name = attr.ib()
base_name = attr.ib()
predecessor_name = attr.ib()
predecessor_triple = attr.ib()
unset_fields = attr.ib()
preset_fields = attr.ib()
preset_seq_elem_counts = attr.ib(factory=(lambda : collections.Counter()))
|
@attr.s(frozen=True)
class Primitive():
value = attr.ib()
|
class TreeBPE():
def __init__(self, grammar):
self.grammar = grammar
self.ast_wrapper = grammar.ast_wrapper
self.type_infos = {k: TypeInfo(name=k, base_name=k, predecessor_name=k, predecessor_triple=None, unset_fields=collections.OrderedDict(((field.name, field) for field in v.fields)), preset_fields={}) for (k, v) in self.ast_wrapper.singular_types.items()}
self.type_graph = networkx.DiGraph()
for k in self.ast_wrapper.singular_types:
self.type_graph.add_node(k)
self.created_types = []
self.pre_iteration_counts = []
self.iterations_finished = 0
def run_iteration(self, trees):
(triple_occurrences, node_type_counts) = self.count_triples(trees)
self.pre_iteration_counts.append(node_type_counts)
(most_freq_triple, most_freq_occurrences) = max(triple_occurrences.items(), key=(lambda kv: len(kv[1])))
if (len(most_freq_occurrences) == 1):
raise Exception('No more work to do!')
(existing_type_name, field_name, field_info) = most_freq_triple
tuple_name = (field_name if isinstance(field_name, tuple) else (field_name,))
existing_type = self.type_infos[existing_type_name]
existing_field = existing_type.unset_fields[field_name]
promoted_fields = []
promoted_seq_elem_counts = collections.Counter()
promoted_preset_fields = {}
if (isinstance(field_info, Primitive) or (field_info is None)):
pass
else:
for (is_preset, (field_field_name, field_field)) in itertools.chain(zip(itertools.repeat(False), self.type_infos[field_info].unset_fields.items()), zip(itertools.repeat(True), self.type_infos[field_info].preset_fields.items())):
if isinstance(field_field_name, tuple):
field_field_tuple_name = field_field_name
else:
field_field_tuple_name = (field_field_name,)
if existing_field.seq:
suffix = ((existing_type.preset_seq_elem_counts[tuple_name],) + field_field_tuple_name)
else:
suffix = field_field_tuple_name
new_name = (tuple_name + suffix)
if isinstance(field_field, asdl.Field):
new_field = asdl.Field(type=field_field.type, name=new_name, seq=field_field.seq, opt=field_field.opt)
else:
new_field = field_field
if is_preset:
promoted_preset_fields[new_name] = new_field
else:
promoted_fields.append((field_field, new_field))
seq_elem_count = self.type_infos[field_info].preset_seq_elem_counts[field_field_tuple_name]
if seq_elem_count:
promoted_seq_elem_counts[new_name] = seq_elem_count
new_preset_fields = {**existing_type.preset_fields, **promoted_preset_fields}
new_preset_seq_elem_counts = (existing_type.preset_seq_elem_counts + promoted_seq_elem_counts)
if (existing_field.seq and (field_info is not None)):
new_preset_fields[(tuple_name + (new_preset_seq_elem_counts[tuple_name],))] = field_info
new_preset_seq_elem_counts[tuple_name] += 1
else:
new_preset_fields[tuple_name] = field_info
new_unset_fields = {**{f.name: f for (old_field, f) in promoted_fields}, **existing_type.unset_fields}
if ((field_info is None) or (not existing_field.seq)):
del new_unset_fields[field_name]
new_type = TypeInfo(name='Type{:04d}_{}'.format(self.iterations_finished, existing_type.base_name), base_name=existing_type.base_name, predecessor_name=existing_type.name, predecessor_triple=most_freq_triple, unset_fields=new_unset_fields, preset_fields=new_preset_fields, preset_seq_elem_counts=new_preset_seq_elem_counts)
self.type_infos[new_type.name] = new_type
self.created_types.append(new_type)
self.type_graph.add_edge(new_type.name, existing_type.name)
self.iterations_finished += 1
discarded = IdentitySet()
for occ in most_freq_occurrences:
if (occ in discarded):
continue
occ['_type'] = new_type.name
def delete_obsoleted_field():
if existing_field.seq:
del occ[field_name][0]
if (not occ[field_name]):
del occ[field_name]
else:
del occ[field_name]
if isinstance(field_info, Primitive):
delete_obsoleted_field()
elif (field_info is None):
pass
else:
if existing_field.seq:
value_to_promote = occ[field_name][0]
else:
value_to_promote = occ[field_name]
delete_obsoleted_field()
discarded.add(value_to_promote)
for (old_field, new_field) in promoted_fields:
if (old_field.name not in value_to_promote):
assert (old_field.opt or old_field.seq)
continue
occ[new_field.name] = value_to_promote[old_field.name]
assert occ[new_field.name]
def finish(self, trees):
(_, node_type_counts) = self.count_triples(trees)
self.pre_iteration_counts.append(node_type_counts)
def count_triples(self, trees):
triple_occurrences = collections.defaultdict(list)
node_type_counts = collections.Counter()
for tree in trees:
queue = collections.deque([tree])
while queue:
node = queue.pop()
node_type_counts[node['_type']] += 1
for (field_name, field) in self.type_infos[node['_type']].unset_fields.items():
if (field_name in node):
field_value = node[field_name]
is_primitive = (field.type in self.ast_wrapper.primitive_types)
if field.seq:
relevant_value = field_value[0]
if (not is_primitive):
queue.extend(field_value)
else:
relevant_value = field_value
if (not is_primitive):
queue.append(field_value)
if is_primitive:
field_info = Primitive(relevant_value)
else:
field_info = relevant_value['_type']
else:
assert (field.seq or field.opt)
field_info = None
triple_occurrences[(node['_type'], field_name, field_info)].append(node)
for field_name in self.type_infos[node['_type']].preset_fields:
assert (field_name not in node)
return (triple_occurrences, node_type_counts)
def visualize(self, root_type: TypeInfo):
result = io.StringIO()
def print_type(this_type, parent_lasts, field_prefix):
def print_child(s, last, parent_lasts):
for parent_last in parent_lasts:
if parent_last:
result.write(' ')
else:
result.write('│ ')
if last:
result.write('└─')
else:
result.write('├─')
print(s, file=result)
if parent_lasts:
print_child(this_type.base_name, parent_lasts[(- 1)], parent_lasts[:(- 1)])
else:
print(this_type.base_name, file=result)
fields = self.type_infos[this_type.base_name].unset_fields
for (i, field) in enumerate(fields.values()):
last_field = ((i + 1) == len(fields))
print_child('{} [{}]{}'.format(field.name, field.type, ('?' if field.opt else ('*' if field.seq else ''))), last_field, parent_lasts)
field_path = (field_prefix + (field.name,))
parent_lasts_for_field = (parent_lasts + (last_field,))
if (field.opt and (field_path in root_type.preset_fields) and (root_type.preset_fields[field_path] is None)):
pass
elif field.seq:
if (field_path in root_type.preset_fields):
assert (root_type.preset_fields[field_path] is None)
seq_complete = True
else:
seq_complete = False
preset_count = root_type.preset_seq_elem_counts[field_path]
for i in range(preset_count):
last_seq_elem = (seq_complete and ((i + 1) == preset_count))
seq_elem_path = (field_path + (i,))
field_value = root_type.preset_fields[seq_elem_path]
if isinstance(field_value, Primitive):
print_child(repr(field_value.value), last_seq_elem, parent_lasts_for_seq)
else:
print_type(self.type_infos[field_value], (parent_lasts_for_field + (last_seq_elem,)), seq_elem_path)
if (not seq_complete):
print_child('??', True, parent_lasts_for_field)
elif (field_path not in root_type.preset_fields):
print_child('??', True, parent_lasts_for_field)
else:
field_value = root_type.preset_fields[field_path]
if isinstance(field_value, Primitive):
print_child(repr(field_value.value), True, parent_lasts_for_field)
else:
print_type(self.type_infos[field_value], (parent_lasts_for_field + (True,)), field_path)
print_type(root_type, (), ())
return result.getvalue()
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', required=True)
parser.add_argument('--config-args')
parser.add_argument('--section', default='train')
parser.add_argument('--num-iters', type=int, default=100)
parser.add_argument('--vis-out')
args = parser.parse_args()
if args.config_args:
config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={'args': args.config_args}))
else:
config = json.loads(_jsonnet.evaluate_file(args.config))
model_preproc = registry.instantiate(registry.lookup('model', config['model']).Preproc, config['model'])
model_preproc.load()
preproc_data = model_preproc.dataset(args.section)
all_trees = [dec.tree for (enc, dec) in preproc_data]
tree_bpe = TreeBPE(model_preproc.dec_preproc.grammar)
for i in tqdm.tqdm(range(args.num_iters), dynamic_ncols=True):
tree_bpe.run_iteration(all_trees)
tree_bpe.finish(all_trees)
print('Finished')
if args.vis_out:
f = open(args.vis_out, 'w')
f.write("# Documentation\n#\n# Idiom trees are printed like this:\n# NodeType\n# ├─field1 [field1_type]\n# ├─field2 [field2_type]?\n# └─field3 [field3_type]*\n# ? indicates the field is optional.\n# * indicates the field is sequential.\n#\n# If a field has a known primitive value, it is written like this:\n# └─field3 [str]\n# └─'value'\n#\n# If a field has a known type for its value, it is written like this:\n# └─field3 [field3_type]\n# └─Field3NodeType\n# └─...\n#\n# If a field:\n# - does not have a known value, or\n# - is sequential and the idiom allows for further entries at the end\n# it is written like this:\n# └─field3 [field3_type]\n# └─??\n# \n# If a field:\n# - is optional and known to lack a value, or\n# - is sequential and the idiom does not allow for further entries at the end\n# then there is no ??.\n\nInitial node type frequency:\n")
for (k, v) in tree_bpe.pre_iteration_counts[0].most_common():
print('- {}: {}'.format(k, v), file=f)
print(file=f)
for (i, type_info) in enumerate(tree_bpe.created_types):
print('# Idiom {} [{}]'.format(i, type_info.name), file=f)
print('# Descended from {} by setting {} to {}'.format(*type_info.predecessor_triple), file=f)
print('# Frequency at creation: {}'.format(tree_bpe.pre_iteration_counts[(i + 1)][type_info.name]), file=f)
print(tree_bpe.visualize(type_info), file=f)
f.close()
else:
import IPython
IPython.embed()
|
class ASTWrapperVisitor(asdl.VisitorBase):
'Used by ASTWrapper to collect information.\n\n - put constructors in one place.\n - checks that all fields have names.\n - get all optional fields.\n '
def __init__(self):
super(ASTWrapperVisitor, self).__init__()
self.constructors = {}
self.sum_types = {}
self.product_types = {}
self.fieldless_constructors = {}
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type_):
self.visit(type_.value, str(type_.name))
def visitSum(self, sum_, name):
self.sum_types[name] = sum_
for t in sum_.types:
self.visit(t, name)
def visitConstructor(self, cons, _name):
assert (cons.name not in self.constructors)
self.constructors[cons.name] = cons
if (not cons.fields):
self.fieldless_constructors[cons.name] = cons
for f in cons.fields:
self.visit(f, cons.name)
def visitField(self, field, name):
if (field.name is None):
raise ValueError('Field of type {} in {} lacks name'.format(field.type, name))
def visitProduct(self, prod, name):
self.product_types[name] = prod
for f in prod.fields:
self.visit(f, name)
|
class FilterType():
def __init__(self, typ):
self.typ = typ
def __call__(self, x):
return isinstance(x, self.typ)
|
def is_singleton(x):
return ((x is True) or (x is False) or (x is None))
|
class ASTWrapper(object):
'Provides helper methods on the ASDL AST.'
default_primitive_type_checkers = {'identifier': FilterType(str), 'int': FilterType(int), 'string': FilterType(str), 'bytes': FilterType(bytes), 'object': FilterType(object), 'singleton': is_singleton}
def __init__(self, ast_def, custom_primitive_type_checkers={}):
self.ast_def = ast_def
visitor = ASTWrapperVisitor()
visitor.visit(ast_def)
self.constructors = visitor.constructors
self.sum_types = visitor.sum_types
self.product_types = visitor.product_types
self.seq_fragment_constructors = {}
self.primitive_type_checkers = {**self.default_primitive_type_checkers, **custom_primitive_type_checkers}
self.custom_primitive_types = set(custom_primitive_type_checkers.keys())
self.primitive_types = set(self.primitive_type_checkers.keys())
self.singular_types = {}
self.singular_types.update(self.constructors)
self.singular_types.update(self.product_types)
self.sum_type_vocabs = {name: sorted((t.name for t in sum_type.types)) for (name, sum_type) in self.sum_types.items()}
self.constructor_to_sum_type = {constructor.name: name for (name, sum_type) in self.sum_types.items() for constructor in sum_type.types}
self.seq_fragment_constructor_to_sum_type = {constructor.name: name for (name, sum_type) in self.sum_types.items() for constructor in sum_type.types}
self.fieldless_constructors = sorted(visitor.fieldless_constructors.keys())
@property
def types(self):
return self.ast_def.types
@property
def root_type(self):
return self._root_type
def add_sum_type(self, name, sum_type):
assert (name not in self.sum_types)
self.sum_types[name] = sum_type
self.types[name] = sum_type
for type_ in sum_type.types:
self._add_constructor(name, type_)
def add_constructors_to_sum_type(self, sum_type_name, constructors):
for constructor in constructors:
self._add_constructor(sum_type_name, constructor)
self.sum_types[sum_type_name].types += constructors
def remove_product_type(self, product_type_name):
self.singular_types.pop(product_type_name)
self.product_types.pop(product_type_name)
self.types.pop(product_type_name)
def add_seq_fragment_type(self, sum_type_name, constructors):
for constructor in constructors:
self._add_constructor(sum_type_name, constructor)
sum_type = self.sum_types[sum_type_name]
if (not hasattr(sum_type, 'seq_fragment_types')):
sum_type.seq_fragment_types = []
sum_type.seq_fragment_types += constructors
def _add_constructor(self, sum_type_name, constructor):
assert (constructor.name not in self.constructors)
self.constructors[constructor.name] = constructor
assert (constructor.name not in self.singular_types)
self.singular_types[constructor.name] = constructor
assert (constructor.name not in self.constructor_to_sum_type)
self.constructor_to_sum_type[constructor.name] = sum_type_name
if (not constructor.fields):
self.fieldless_constructors.append(constructor.name)
self.fieldless_constructors.sort()
def verify_ast(self, node, expected_type=None, field_path=(), is_seq=False):
'Checks that `node` conforms to the current ASDL.'
if (node is None):
raise ValueError('node is None. path: {}'.format(field_path))
if (not isinstance(node, dict)):
raise ValueError('node is type {}. path: {}'.format(type(node), field_path))
node_type = node['_type']
if (expected_type is not None):
sum_product = self.types[expected_type]
if isinstance(sum_product, asdl.Product):
if (node_type != expected_type):
raise ValueError('Expected type {}, but instead saw {}. path: {}'.format(expected_type, node_type, field_path))
elif isinstance(sum_product, asdl.Sum):
possible_names = [t.name for t in sum_product.types]
if is_seq:
possible_names += [t.name for t in getattr(sum_product, 'seq_fragment_types', [])]
if (node_type not in possible_names):
raise ValueError('Expected one of {}, but instead saw {}. path: {}'.format(', '.join(possible_names), node_type, field_path))
else:
raise ValueError('Unexpected type in ASDL: {}'.format(sum_product))
if (node_type in self.types):
sum_product = self.types[node_type]
if isinstance(sum_product, asdl.Sum):
raise ValueError('sum type {} not allowed as node type. path: {}'.format(node_type, field_path))
fields_to_check = sum_product.fields
elif (node_type in self.constructors):
fields_to_check = self.constructors[node_type].fields
else:
raise ValueError('Unknown node_type {}. path: {}'.format(node_type, field_path))
for field in fields_to_check:
if (field.name not in node):
if (field.opt or field.seq):
continue
raise ValueError('required field {} is missing. path: {}'.format(field.name, field_path))
if (field.seq and (field.name in node) and (not isinstance(node[field.name], (list, tuple)))):
raise ValueError('sequential field {} is not sequence. path: {}'.format(field.name, field_path))
items = (node.get(field.name, ()) if field.seq else (node.get(field.name),))
if (field.type in self.primitive_type_checkers):
check = self.primitive_type_checkers[field.type]
else:
check = (lambda n: self.verify_ast(n, field.type, (field_path + (field.name,)), is_seq=field.seq))
for item in items:
assert check(item)
return True
def find_all_descendants_of_type(self, tree, type, descend_pred=(lambda field: True)):
queue = [tree]
while queue:
node = queue.pop()
if (not isinstance(node, dict)):
continue
for field_info in self.singular_types[node['_type']].fields:
if (field_info.opt and (field_info.name not in node)):
continue
if (not descend_pred(field_info)):
continue
if field_info.seq:
values = node.get(field_info.name, [])
else:
values = [node[field_info.name]]
if (field_info.type == type):
for value in values:
(yield value)
else:
queue.extend(values)
|
@attr.s
class HoleValuePlaceholder():
id = attr.ib()
field_name = attr.ib()
type = attr.ib()
is_seq = attr.ib()
is_opt = attr.ib()
|
@attr.s
class Hypothesis():
inference_state = attr.ib()
next_choices = attr.ib()
score = attr.ib(default=0)
choice_history = attr.ib(factory=list)
score_history = attr.ib(factory=list)
|
def beam_search(model, orig_item, preproc_item, beam_size, max_steps, visualize_flag=False):
(inference_state, next_choices) = model.begin_inference(orig_item, preproc_item)
beam = [Hypothesis(inference_state, next_choices)]
finished = []
for step in range(max_steps):
if visualize_flag:
print('step:')
print(step)
if (len(finished) == beam_size):
break
candidates = []
for hyp in beam:
candidates += [(hyp, choice, choice_score.item(), (hyp.score + choice_score.item())) for (choice, choice_score) in hyp.next_choices]
candidates.sort(key=operator.itemgetter(3), reverse=True)
candidates = candidates[:(beam_size - len(finished))]
beam = []
for (hyp, choice, choice_score, cum_score) in candidates:
inference_state = hyp.inference_state.clone()
next_choices = inference_state.step(choice)
if (next_choices is None):
finished.append(Hypothesis(inference_state, None, cum_score, (hyp.choice_history + [choice]), (hyp.score_history + [choice_score])))
else:
beam.append(Hypothesis(inference_state, next_choices, cum_score, (hyp.choice_history + [choice]), (hyp.score_history + [choice_score])))
finished.sort(key=operator.attrgetter('score'), reverse=True)
return finished
|
class Barrier(object):
def __init__(self, parties, action=(lambda : None)):
self._parties = parties
self._action = action
self._cond = asyncio.Condition()
self._count = 0
async def wait(self):
self._count += 1
with (await self._cond):
if self._maybe_release():
return
(await self._cond.wait())
async def deregister(self):
self._parties -= 1
with (await self._cond):
self._maybe_release()
@property
def empty(self):
return (self._parties == 0)
@property
def n_waiting(self):
return self._count
@property
def parties(self):
return self._parties
def _maybe_release(self):
if (self._count == self._parties):
self._cond.notify_all()
self._count = 0
self._action()
return True
return False
|
@attr.s
class ResultHandle(object):
coro = attr.ib()
node = attr.ib()
all_results = attr.ib()
accessor = attr.ib(default=(lambda x: x))
def __await__(self):
result = self.all_results.get(self.node)
if (result is None):
(yield from self.coro().__await__())
result = self.all_results[self.node]
return self.accessor(result)
def with_shape(self, *shape):
copied = copy.copy(self)
copied.shape = shape
return copied
def split(self, num_splits):
result = []
for i in range(num_splits):
copied = copy.copy(self)
copied.accessor = (lambda x, i=i: self.accessor(x)[i])
result.append(copied)
return tuple(result)
|
@attr.s(frozen=True, cmp=False, hash=False)
class BatchKey(object):
callable = attr.ib()
args = attr.ib()
kwargs = attr.ib()
def __attrs_post_init__(self):
if isinstance(self.callable, functools.partial):
callable_exp = (self.callable.func, self.callable.args, tuple(((k, v) for (k, v) in sorted(self.callable.keywords.items()))))
else:
callable_exp = (self.callable, (), ())
self.__dict__['_callable_exp'] = callable_exp
self.__dict__['_hash'] = hash((callable_exp, self.args, self.kwargs))
def __eq__(self, other):
if (not isinstance(other, BatchKey)):
return False
return ((self._callable_exp == other._callable_exp) and (self.args == other.args) and (self.kwargs == other.kwargs))
def __hash__(self):
return self._hash
|
@attr.s(cmp=False)
class Node(object):
args = attr.ib()
kwargs = attr.ib()
batch_key = attr.ib()
depth = attr.ib(default=0)
outgoing = attr.ib(default=attr.Factory(list))
num_incoming = attr.ib(default=0)
|
class StreamingMean(object):
def __init__(self):
self.value = None
self.count = 0.0
def add(self, value):
if (not self.count):
self.value = value
else:
self.value *= (self.count / (self.count + 1))
self.value += (value / (self.count + 1))
self.count += 1
|
class TorchBatcher(object):
def __init__(self):
self.barrier = None
self._reset()
def _reset(self):
self.enqueued_nodes = []
self.results = {}
self.mean_depth_by_key = collections.defaultdict(StreamingMean)
def __call__(self, callable, *args, **kwargs):
batch_key = self._batch_key(callable, *args, **kwargs)
node = Node(args, kwargs, batch_key)
for arg in itertools.chain(args, kwargs.values()):
if isinstance(arg, ResultHandle):
node.num_incoming += 1
node.depth = max(node.depth, (arg.node.depth + 1))
arg.node.outgoing.append(node)
self.enqueued_nodes.append(node)
self.mean_depth_by_key[batch_key].add(node.depth)
coro = ResultHandle(self.barrier.wait, node, self.results)
return coro
async def _wrap_deregister(self, coroutine):
result = (await coroutine)
(await self.barrier.deregister())
return result
def run(self, coroutines):
self.barrier = barrier.Barrier(len(coroutines), self._compute)
loop = asyncio.get_event_loop()
return loop.run_until_complete(asyncio.gather(*(self._wrap_deregister(c) for c in coroutines)))
def _compute(self):
agenda = collections.defaultdict(list)
while (self.enqueued_nodes or agenda):
remaining_nodes = []
while self.enqueued_nodes:
node = self.enqueued_nodes.pop()
if (node.num_incoming == 0):
agenda[node.batch_key].append(node)
else:
remaining_nodes.append(node)
self.enqueued_nodes = remaining_nodes
batch_key = min(agenda, key=(lambda k: self.mean_depth_by_key[k].value))
nodes = agenda[batch_key]
args = [self._stack([self._to_value(node.args[i]) for node in nodes]) for i in range(len(batch_key.args))]
kwargs = {k: self._stack([self._to_value(node.kwargs[k]) for node in nodes]) for (k, shape) in batch_key.kwargs}
results = self._unstack(batch_key.callable(*args, **kwargs))
for (node, result) in zip(nodes, results):
self.results[node] = result
for next_node in node.outgoing:
next_node.num_incoming -= 1
del agenda[batch_key]
self._reset()
def _batch_key(self, callable, *args, **kwargs):
return BatchKey(callable, tuple((self._batch_key_single(arg) for arg in args)), tuple(((k, self._batch_key_single(v)) for (k, v) in sorted(kwargs.items()))))
def _to_value(self, handle_or_value):
if isinstance(handle_or_value, ResultHandle):
return handle_or_value.accessor(self.results[handle_or_value.node])
return handle_or_value
def _stack(self, items):
return torch.stack(items)
def _unstack(self, stacked):
if isinstance(stacked, tuple):
return zip(*([piece.squeeze(0) for piece in stacked_elem.split(1)] for stacked_elem in stacked))
return [piece.squeeze(0) for piece in stacked.split(1)]
def _batch_key_single(self, arg):
return arg.shape
|
class TorchNoOpBatcher(TorchBatcher):
async def __call__(self, callable, *args, **kwargs):
args = [self._noop_stack(arg) for arg in args]
kwargs = {k: self._noop_stack(arg) for (k, arg) in kwargs.items()}
return self._noop_unstack(callable(*args, **kwargs))
def _noop_stack(self, item):
return torch.unsqueeze(item, 0)
def _noop_unstack(self, stacked):
return torch.squeeze(stacked, 0)
|
def test_streaming_mean():
m = batcher.StreamingMean()
values = list(range(10, 20))
for (i, value) in enumerate(values):
m.add(value)
assert (m.value == np.mean(values[:(i + 1)]))
|
def test_simple_linear():
batch_size = 32
linear = unittest.mock.Mock(wraps=torch.nn.Linear(8, 16))
inp = torch.autograd.Variable(torch.rand(batch_size, 8))
batcher = torch_batcher.TorchBatcher()
async def process(item):
result = (await batcher(linear, item))
return result
results = batcher.run([process(inp[i]) for i in range(batch_size)])
assert (linear.call_count == 1)
for i in range(batch_size):
single_result = linear(inp[i:(i + 1)]).squeeze(0)
assert (results[i].data.numpy() == pytest.approx(single_result.data.numpy(), abs=1e-06))
|
def test_simple_linear_defer():
batch_size = 4
linear = unittest.mock.Mock(wraps=torch.nn.Linear(8, 16))
inp = torch.autograd.Variable(torch.rand(batch_size, 8))
batcher = torch_batcher.TorchBatcher()
async def process(item1, item2):
result1 = batcher(linear, item1)
result2 = batcher(linear, item2)
return ((await result1), (await result2))
results = batcher.run([process(inp[i], inp[(i + 1)]) for i in range(0, batch_size, 2)])
assert (linear.call_count == 1)
results = [r for pair in results for r in pair]
for i in range(batch_size):
single_result = linear(inp[i:(i + 1)]).squeeze(0)
assert (results[i].data.numpy() == pytest.approx(single_result.data.numpy(), abs=1e-06))
|
def test_multi_stage():
batch_size = 32
linear = unittest.mock.Mock(wraps=torch.nn.Linear(8, 8))
inp = torch.autograd.Variable(torch.rand(batch_size, 8))
batcher = torch_batcher.TorchBatcher()
async def process(item, iters):
for iter in range(iters):
item = (await batcher(linear, item))
return item
results = batcher.run([process(inp[i], ((i + 1) // 4)) for i in range(batch_size)])
assert (linear.call_count == (32 // 4))
for i in range(batch_size):
row = inp[i:(i + 1)]
for iter in range(((i + 1) // 4)):
row = linear(row)
single_result = row.squeeze(0)
assert (results[i].data.numpy() == pytest.approx(single_result.data.numpy(), abs=1e-06))
|
def test_multi_stage_deferred():
batch_size = 32
linear = unittest.mock.Mock(wraps=torch.nn.Linear(8, 8))
inp = torch.autograd.Variable(torch.rand(batch_size, 8))
batcher = torch_batcher.TorchBatcher()
async def process(item, iters):
for iter in range(iters):
item = batcher(linear, item).with_shape(8)
if iters:
return (await item)
return item
results = batcher.run([process(inp[i], ((i + 1) // 4)) for i in range(batch_size)])
assert (linear.call_count == (32 // 4))
for i in range(batch_size):
row = inp[i:(i + 1)]
for iter in range(((i + 1) // 4)):
row = linear(row)
single_result = row.squeeze(0)
assert (results[i].data.numpy() == pytest.approx(single_result.data.numpy(), abs=1e-06))
|
def test_multi_stage_and_modules():
batch_size = 32
linears = [unittest.mock.Mock(wraps=torch.nn.Linear(8, 8)) for _ in range(5)]
inp = torch.autograd.Variable(torch.rand(batch_size, 8))
batcher = torch_batcher.TorchBatcher()
async def process(i, item, iters):
for iter in range(iters):
item = (await batcher(linears[((i + iter) % len(linears))], item))
return item
results = batcher.run([process(i, inp[i], ((i + 1) // 4)) for i in range(batch_size)])
for i in range(len(linears)):
assert (linears[i].call_count <= 8)
for i in range(batch_size):
row = inp[i:(i + 1)]
for iter in range(((i + 1) // 4)):
row = linears[((i + iter) % len(linears))](row)
single_result = row.squeeze(0)
assert (results[i].data.numpy() == pytest.approx(single_result.data.numpy(), abs=1e-06))
|
def test_multi_args():
batch_size = 32
add = unittest.mock.Mock(wraps=torch.nn.Bilinear(1, 1, 1))
inp = torch.autograd.Variable(torch.arange(batch_size).view((- 1), 1))
batcher = torch_batcher.TorchBatcher()
async def process(item, iters):
for iter in range(iters):
item = (await batcher(add, item, input2=item))
return item
results = batcher.run([process(inp[i], ((i + 1) // 8)) for i in range(batch_size)])
assert (add.call_count == 4)
for i in range(batch_size):
row = inp[i:(i + 1)]
for iter in range(((i + 1) // 8)):
row = add(row, row)
single_result = row.squeeze(0)
assert (results[i].data.numpy() == pytest.approx(single_result.data.numpy(), abs=1e-06))
|
def test_multi_args_deferred():
batch_size = 32
add = unittest.mock.Mock(wraps=torch.nn.Bilinear(1, 1, 1))
inp = torch.autograd.Variable(torch.arange(batch_size).view((- 1), 1))
batcher = torch_batcher.TorchBatcher()
async def process(item, iters):
for iter in range(iters):
item = batcher(add, item, input2=item).with_shape(1)
if iters:
return (await item)
else:
return item
results = batcher.run([process(inp[i], ((i + 1) // 8)) for i in range(batch_size)])
assert (add.call_count == 4)
for i in range(batch_size):
row = inp[i:(i + 1)]
for iter in range(((i + 1) // 8)):
row = add(row, row)
single_result = row.squeeze(0)
assert (results[i].data.numpy() == pytest.approx(single_result.data.numpy(), abs=1e-06))
|
def test_multi_args_mixed_deferred():
batch_size = 6
add = unittest.mock.Mock(wraps=torch.nn.Bilinear(1, 1, 1))
inp = torch.autograd.Variable(torch.arange(batch_size).view((- 1), 1))
double = (lambda x: (x * 2))
sum = (lambda *args: torch.sum(torch.cat(args, dim=1), dim=1))
batcher = torch_batcher.TorchBatcher()
async def process(item, iters):
to_sum = []
for iter in range(iters):
item = batcher(add, item, input2=item).with_shape(1)
to_sum.append(batcher(double, item).with_shape(1))
item = (await item)
return (item, (await batcher(sum, *to_sum)))
num_iters = [1, 1, 2, 2, 3, 3]
results = batcher.run([process(inp[i], num_iters[i]) for i in range(batch_size)])
assert (add.call_count == 3)
for i in range(batch_size):
row = inp[i:(i + 1)]
to_sum = []
for iter in range(num_iters[i]):
row = add(row, row)
to_sum.append(double(row))
single_result1 = row.squeeze(0)
single_result2 = sum(*to_sum)
assert (results[i][0].data.numpy() == pytest.approx(single_result1.data.numpy(), abs=1e-06))
assert (results[i][1].data.numpy() == pytest.approx(single_result2.data.numpy(), abs=1e-06))
|
def test_multi_shape():
sizes = [((i // 4) + 1) for i in range(32)]
random.seed(32)
random.shuffle(sizes)
inps = []
for size in sizes:
inps.append(torch.rand(size))
with unittest.mock.patch('torch.exp', wraps=torch.exp) as mock:
batcher = torch_batcher.TorchBatcher()
async def process(item):
return (await batcher(torch.exp, item))
results = batcher.run([process(inp) for inp in inps])
assert (mock.call_count == 8)
for (inp, result) in zip(inps, results):
assert (torch.exp(inp).numpy() == pytest.approx(result.numpy()))
|
def test_partial_softmax():
import functools
batch_size = 32
inp = torch.autograd.Variable(torch.rand(batch_size, 8))
torch_softmax = functools.partial(unittest.mock.Mock(wraps=torch.nn.functional.softmax), dim=(- 1))
batcher = torch_batcher.TorchBatcher()
async def process(item):
return (await batcher(torch_softmax, item))
results = batcher.run([process(inp[i]) for i in range(batch_size)])
assert (torch_softmax.func.call_count == 1)
for i in range(batch_size):
single_result = torch_softmax(inp[i:(i + 1)]).squeeze(0)
assert (results[i].data.numpy() == pytest.approx(single_result.data.numpy(), abs=1e-06))
|
def test_partial_max():
import functools
batch_size = 3
inp = torch.autograd.Variable(torch.rand(batch_size, 8))
torch_max = functools.partial(unittest.mock.Mock(wraps=torch.max), dim=(- 1))
torch_get = (lambda x, i: x[(range(x.shape[0]), i.view((- 1)))])
double = (lambda x: (x * 2))
batcher = torch_batcher.TorchBatcher()
async def process(item):
(max_value, max_idx) = batcher(torch_max, item).split(2)
max_idx = max_idx.with_shape(1)
doubled_idx = batcher(double, max_idx)
max_value2 = batcher(torch_get, item, max_idx)
max_value = (await max_value)
max_idx = (await max_idx)
doubled_idx = (await doubled_idx)
max_value2 = (await max_value2)
assert (max_value.data[0] == max_value2.data[0])
(max_value3, _) = batcher(torch_max, item).split(2)
max_value3 = (await max_value3)
assert (max_value.data[0] == max_value3.data[0])
return (max_value, max_idx, doubled_idx)
results = batcher.run([process(inp[i]) for i in range(batch_size)])
assert (torch_max.func.call_count == 2)
for i in range(batch_size):
(max_value, max_idx) = torch_max(inp[i])
doubled_idx = double(max_idx)
assert (results[i][0].data.numpy() == pytest.approx(max_value.data.numpy(), abs=1e-06))
assert (results[i][1].data.numpy() == pytest.approx(max_idx.data.numpy(), abs=1e-06))
assert (results[i][2].data.numpy() == pytest.approx(doubled_idx.data.numpy(), abs=1e-06))
|
@attr.s
class DjangoItem():
text = attr.ib()
code = attr.ib()
str_map = attr.ib()
|
@registry.register('dataset', 'django')
class DjangoDataset(torch.utils.data.Dataset):
def __init__(self, path, limit=None):
self.path = path
self.examples = []
for line in itertools.islice(open(self.path), limit):
example = json.loads(line)
self.examples.append(DjangoItem(text=example['text']['tokens'], code=example['orig'], str_map=example['text']['str_map']))
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
@attr.s
class Metrics():
dataset = attr.ib()
exact_match = attr.ib(factory=list)
def add(self, item, inferred_code, obsolete_gold_code=None):
if (obsolete_gold_code is None):
try:
gold_code = astor.to_source(ast.parse(item.code))
except ParseError:
return
else:
gold_code = obsolete_gold_code
exact_match = (gold_code == inferred_code)
self.exact_match.append(exact_match)
def finalize(self):
return collections.OrderedDict((('exact match', (sum(self.exact_match) / len(self.exact_match))),))
|
@attr.s
class HearthstoneItem():
text = attr.ib()
code = attr.ib()
|
@registry.register('dataset', 'hearthstone')
class HearthstoneDataset(torch.utils.data.Dataset):
def __init__(self, path, limit=None):
self.path = path
self.examples = []
for example in itertools.islice(zip(open((self.path + '.in')), open((self.path + '.out'))), limit):
processed = self._process(example)
if (processed is not None):
self.examples.append(processed)
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
def _process(self, example):
(text, code) = example
code = '\n'.join(code.split(LINE_SEP))
text = re.sub('<.*?>', '', text)
orig_tokens = nltk.word_tokenize(text)
tokens = ['<name>']
for token in orig_tokens:
if (token == 'NAME_END'):
tokens += ['</name>', '<atk>']
elif (token == 'ATK_END'):
tokens += ['</atk>', '<def>']
elif (token == 'DEF_END'):
tokens += ['</def>', '<cost>']
elif (token == 'COST_END'):
tokens += ['</cost>', '<dur>']
elif (token == 'DUR_END'):
tokens += ['</dur>', '<type>']
elif (token == 'TYPE_END'):
tokens += ['</type>', '<player-cls>']
elif (token == 'PLAYER_CLS_END'):
tokens += ['</player-cls>', '<race>']
elif (token == 'RACE_END'):
tokens += ['</race>', '<rarity>']
elif (token == 'RARITY_END'):
tokens += ['</rarity>']
else:
tokens += [token]
return HearthstoneItem(text=tokens, code=code)
@attr.s
class Metrics():
dataset = attr.ib()
exact_match = attr.ib(factory=list)
sentence_bleu_scores = attr.ib(factory=list)
gold_codes = attr.ib(factory=list)
inferred_codes = attr.ib(factory=list)
smoothing_function = nltk.translate.bleu_score.SmoothingFunction().method3
def add(self, item, inferred_code, obsolete_gold_code=None):
if (obsolete_gold_code is None):
try:
gold_code = astor.to_source(ast.parse(item.code))
except ParseError:
return
else:
gold_code = obsolete_gold_code
if (inferred_code is None):
inferred_code = ''
exact_match = (gold_code == inferred_code)
gold_tokens_for_bleu = tokenize_for_bleu_eval(gold_code)
inferred_tokens_for_bleu = tokenize_for_bleu_eval(inferred_code)
ngram_weights = ([0.25] * min(4, len(inferred_tokens_for_bleu)))
bleu_score = nltk.translate.bleu_score.sentence_bleu((gold_tokens_for_bleu,), inferred_tokens_for_bleu, weights=ngram_weights, smoothing_function=self.smoothing_function)
self.exact_match.append(exact_match)
self.sentence_bleu_scores.append(bleu_score)
self.gold_codes.append((gold_tokens_for_bleu,))
self.inferred_codes.append(inferred_tokens_for_bleu)
def finalize(self):
return collections.OrderedDict((('exact match', (sum(self.exact_match) / len(self.exact_match))), ('sentence BLEU', (sum(self.sentence_bleu_scores) / len(self.sentence_bleu_scores))), ('corpus BLEU', nltk.translate.bleu_score.corpus_bleu(self.gold_codes, self.inferred_codes, smoothing_function=self.smoothing_function))))
|
def tokenize_for_bleu_eval(code):
code = re.sub('([^A-Za-z0-9_])', ' \\1 ', code)
code = re.sub('([a-z])([A-Z])', '\\1 \\2', code)
code = re.sub('\\s+', ' ', code)
code = code.replace('"', '`')
code = code.replace("'", '`')
tokens = [t for t in code.split(' ') if t]
return tokens
|
@attr.s
class IdiomAstItem():
text = attr.ib()
code = attr.ib()
orig = attr.ib()
str_map = attr.ib()
|
@registry.register('dataset', 'idiom_ast')
class IdiomAstDataset(torch.utils.data.Dataset):
def __init__(self, path, limit=None):
self.path = path
self.examples = []
for line in itertools.islice(open(self.path), limit):
example = json.loads(line)
if isinstance(example['text'], dict):
self.examples.append(IdiomAstItem(text=example['text']['tokens'], code=example['rewritten_ast'], orig=example, str_map=example['text']['str_map']))
else:
self.examples.append(IdiomAstItem(text=example['text'], code=example['rewritten_ast'], orig=example, str_map=None))
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
|
@attr.s
class SpiderItem():
text = attr.ib()
code = attr.ib()
schema = attr.ib()
orig = attr.ib()
orig_schema = attr.ib()
|
@attr.s
class Column():
id = attr.ib()
table = attr.ib()
name = attr.ib()
unsplit_name = attr.ib()
orig_name = attr.ib()
type = attr.ib()
foreign_key_for = attr.ib(default=None)
|
@attr.s
class Table():
id = attr.ib()
name = attr.ib()
unsplit_name = attr.ib()
orig_name = attr.ib()
columns = attr.ib(factory=list)
primary_keys = attr.ib(factory=list)
|
@attr.s
class Schema():
db_id = attr.ib()
tables = attr.ib()
columns = attr.ib()
foreign_key_graph = attr.ib()
orig = attr.ib()
|
def load_tables(paths):
schemas = {}
eval_foreign_key_maps = {}
for path in paths:
schema_dicts = json.load(open(path))
for schema_dict in schema_dicts:
tables = tuple((Table(id=i, name=name.split(), unsplit_name=name, orig_name=orig_name) for (i, (name, orig_name)) in enumerate(zip(schema_dict['table_names'], schema_dict['table_names_original']))))
columns = tuple((Column(id=i, table=(tables[table_id] if (table_id >= 0) else None), name=col_name.split(), unsplit_name=col_name, orig_name=orig_col_name, type=col_type) for (i, ((table_id, col_name), (_, orig_col_name), col_type)) in enumerate(zip(schema_dict['column_names'], schema_dict['column_names_original'], schema_dict['column_types']))))
for column in columns:
if column.table:
column.table.columns.append(column)
for column_id in schema_dict['primary_keys']:
column = columns[column_id]
column.table.primary_keys.append(column)
foreign_key_graph = nx.DiGraph()
for (source_column_id, dest_column_id) in schema_dict['foreign_keys']:
source_column = columns[source_column_id]
dest_column = columns[dest_column_id]
source_column.foreign_key_for = dest_column
foreign_key_graph.add_edge(source_column.table.id, dest_column.table.id, columns=(source_column_id, dest_column_id))
foreign_key_graph.add_edge(dest_column.table.id, source_column.table.id, columns=(dest_column_id, source_column_id))
db_id = schema_dict['db_id']
assert (db_id not in schemas)
schemas[db_id] = Schema(db_id, tables, columns, foreign_key_graph, schema_dict)
eval_foreign_key_maps[db_id] = evaluation.build_foreign_key_map(schema_dict)
return (schemas, eval_foreign_key_maps)
|
@registry.register('dataset', 'spider')
class SpiderDataset(torch.utils.data.Dataset):
def __init__(self, paths, tables_paths, db_path, limit=None):
self.paths = paths
self.db_path = db_path
self.examples = []
(self.schemas, self.eval_foreign_key_maps) = load_tables(tables_paths)
for path in paths:
raw_data = json.load(open(path))
for entry in raw_data:
item = SpiderItem(text=entry['question_toks'], code=entry['sql'], schema=self.schemas[entry['db_id']], orig=entry, orig_schema=self.schemas[entry['db_id']].orig)
self.examples.append(item)
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
class Metrics():
def __init__(self, dataset):
self.dataset = dataset
self.foreign_key_maps = {db_id: evaluation.build_foreign_key_map(schema.orig) for (db_id, schema) in self.dataset.schemas.items()}
self.evaluator = evaluation.Evaluator(self.dataset.db_path, self.foreign_key_maps, 'match')
self.results = []
def add(self, item, inferred_code):
self.results.append(self.evaluator.evaluate_one(item.schema.db_id, item.orig['query'], inferred_code))
def evaluate_all(self, idx, item, inferred_codes):
beams = [self.evaluator.evaluate_one(item.schema.db_id, item.orig['query'], inferred_code) for inferred_code in inferred_codes]
return (idx, beams)
def finalize(self):
self.evaluator.finalize()
return {'per_item': self.results, 'total_scores': self.evaluator.scores}
|
@registry.register('dataset', 'spider_idiom_ast')
class SpiderIdiomAstDataset(torch.utils.data.Dataset):
def __init__(self, paths, tables_paths, db_path, limit=None):
self.paths = paths
self.db_path = db_path
self.examples = []
(self.schemas, self.eval_foreign_key_maps) = load_tables(tables_paths)
for path in paths:
for line in open(path):
entry = json.loads(line)
item = SpiderItem(text=entry['orig']['question_toks'], code=entry['rewritten_ast'], schema=self.schemas[entry['orig']['db_id']], orig=entry['orig'], orig_schema=self.schemas[entry['orig']['db_id']].orig)
self.examples.append(item)
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
Metrics = SpiderDataset.Metrics
|
class HoleType(enum.Enum):
ReplaceSelf = 1
AddChild = 2
|
class MissingValue():
pass
|
@attr.s
class SeqField():
type_name = attr.ib()
field = attr.ib()
|
@registry.register('grammar', 'idiom_ast')
class IdiomAstGrammar():
def __init__(self, base_grammar, template_file, root_type=None, all_sections_rewritten=False):
self.base_grammar = registry.construct('grammar', base_grammar)
self.templates = json.load(open(template_file))
self.all_sections_rewritten = all_sections_rewritten
self.pointers = self.base_grammar.pointers
self.ast_wrapper = copy.deepcopy(self.base_grammar.ast_wrapper)
self.base_ast_wrapper = self.base_grammar.ast_wrapper
self.root_type = self.base_grammar.root_type
if (base_grammar['name'] == 'python'):
self.root_type = 'mod'
singular_types_with_single_seq_field = set((name for (name, type_info) in self.ast_wrapper.singular_types.items() if ((len(type_info.fields) == 1) and type_info.fields[0].seq)))
seq_fields = {'{}-{}'.format(name, field.name): SeqField(name, field) for (name, type_info) in self.ast_wrapper.singular_types.items() for field in type_info.fields if field.seq}
templates_by_head_type = collections.defaultdict(list)
for template in self.templates:
head_type = template['idiom'][0]
if (head_type in singular_types_with_single_seq_field):
field = self.ast_wrapper.singular_types[head_type].fields[0]
templates_by_head_type[field.type].append((template, SeqField(head_type, field)))
templates_by_head_type[head_type].append((template, None))
elif (head_type in seq_fields):
seq_field = seq_fields[head_type]
templates_by_head_type[seq_field.field.type].append((template, seq_field))
else:
templates_by_head_type[head_type].append((template, None))
types_to_replace = {}
for (head_type, templates) in templates_by_head_type.items():
(constructors, seq_fragment_constructors) = ([], [])
for (template, seq_field) in templates:
if seq_field:
if (head_type in self.ast_wrapper.product_types):
seq_type = '{}_plus_templates'.format(head_type)
else:
seq_type = head_type
seq_fragment_constructors.append(self._template_to_constructor(template, '_{}_seq'.format(seq_type), seq_field))
else:
constructors.append(self._template_to_constructor(template, '', seq_field))
if (head_type in self.ast_wrapper.constructors):
assert constructors
assert (not seq_fragment_constructors)
self.ast_wrapper.add_constructors_to_sum_type(self.ast_wrapper.constructor_to_sum_type[head_type], constructors)
elif (head_type in self.ast_wrapper.sum_types):
assert (not constructors)
assert seq_fragment_constructors
self.ast_wrapper.add_seq_fragment_type(head_type, seq_fragment_constructors)
elif (head_type in self.ast_wrapper.product_types):
orig_prod_type = self.ast_wrapper.product_types[head_type]
new_constructor_for_prod_type = asdl.Constructor(name=head_type, fields=orig_prod_type.fields)
self.ast_wrapper.remove_product_type(head_type)
name = '{}_plus_templates'.format(head_type)
self.ast_wrapper.add_sum_type(name, asdl.Sum(types=(constructors + [new_constructor_for_prod_type])))
self.ast_wrapper.add_seq_fragment_type(name, seq_fragment_constructors)
types_to_replace[head_type] = name
elif (head_type in self.ast_wrapper.primitive_types):
raise NotImplementedError('built-in type as head type of idiom unsupported: {}'.format(head_type))
else:
raise NotImplementedError('Unable to handle head type of idiom: {}'.format(head_type))
for constructor_or_product in self.ast_wrapper.singular_types.values():
for field in constructor_or_product.fields:
if (field.type in types_to_replace):
field.type = types_to_replace[field.type]
self.templates_containing_placeholders = {}
for (name, constructor) in self.ast_wrapper.singular_types.items():
if (not hasattr(constructor, 'template')):
continue
hole_values = {}
for field in constructor.fields:
hole_id = self.get_hole_id(field.name)
placeholder = ast_util.HoleValuePlaceholder(id=hole_id, field_name=field.name, type=field.type, is_seq=field.seq, is_opt=field.opt)
if field.seq:
hole_values[hole_id] = [placeholder]
else:
hole_values[hole_id] = placeholder
self.templates_containing_placeholders[name] = constructor.template(hole_values)
if (root_type is not None):
if isinstance(root_type, (list, tuple)):
for choice in root_type:
if ((choice in self.ast_wrapper.singular_types) or (choice in self.ast_wrapper.sum_types)):
self.root_type = choice
break
else:
self.root_type = root_type
def parse(self, code, section):
if (self.all_sections_rewritten or (section == 'train')):
return self.convert_idiom_ast(code, template_id=None)()
else:
return self.base_grammar.parse(code, section)
def unparse(self, tree, item):
expanded_tree = self._expand_templates(tree)
self.base_ast_wrapper.verify_ast(expanded_tree)
return self.base_grammar.unparse(expanded_tree, item)
def tokenize_field_value(self, field_value):
return self.base_grammar.tokenize_field_value(field_value)
@classmethod
def get_hole_id(cls, field):
m = re.match('^hole(\\d+)$', field)
if (not m):
raise ValueError('Unexpected field name: {}'.format(field))
return int(m.group(1))
def _expand_templates(self, tree):
if (not isinstance(tree, dict)):
return tree
node_type = tree['_type']
constructor = self.ast_wrapper.constructors.get(node_type)
expanded_fields = {}
for (field, value) in tree.items():
if (field == '_type'):
continue
if isinstance(value, (list, tuple)):
result = []
for item in value:
converted = self._expand_templates(item)
if (isinstance(item, dict) and re.match('^Template\\d+_.*_seq$', item['_type'])):
item_type_info = self.ast_wrapper.constructors[converted['_type']]
assert (len(item_type_info.fields) == 1)
assert item_type_info.fields[0].seq
result += converted.get(item_type_info.fields[0].name, [])
else:
result.append(converted)
expanded_fields[field] = result
else:
expanded_fields[field] = self._expand_templates(value)
if ((constructor is None) or (not hasattr(constructor, 'template'))):
return {'_type': node_type, **expanded_fields}
template = constructor.template
hole_values = {}
for (field, expanded_value) in expanded_fields.items():
hole_id = self.get_hole_id(field)
hole_values[hole_id] = expanded_value
return template(hole_values)
def _template_to_constructor(self, template_dict, suffix, seq_field):
hole_node_types = {}
stack = [(None, template_dict['idiom'], None)]
while stack:
(parent, node, child_index) = stack.pop()
(node_type, ref_symbols, hole_id, children) = node
if (hole_id is not None):
assert (hole_id not in hole_node_types)
hyphenated_node_type = None
unhyphenated_node_type = None
hole_type_str = template_dict['holes'][hole_id]['type']
if (hole_type_str == 'AddChild'):
node_type_for_field_type = node_type
elif (hole_type_str == 'ReplaceSelf'):
if ('-' in node_type):
node_type_for_field_type = node_type
else:
node_type_for_field_type = parent[0]
field_info = self._get_field_info_from_name(node_type_for_field_type)
if (field_info.seq and (hole_type_str == 'ReplaceSelf') and ('-' not in node_type)):
assert (child_index in (0, 1))
seq = (child_index == 1)
else:
seq = field_info.seq
hole_node_types[hole_id] = (field_info.type, seq, field_info.opt)
stack += [(node, child, i) for (i, child) in enumerate(children)]
fields = []
for hole in template_dict['holes']:
i = hole['id']
(field_type, seq, opt) = hole_node_types[i]
field = asdl.Field(type=field_type, name='hole{}'.format(i), seq=seq, opt=opt)
field.hole_type = HoleType[hole['type']]
fields.append(field)
constructor = asdl.Constructor('Template{}{}'.format(template_dict['id'], suffix), fields)
constructor.template = self.convert_idiom_ast(template_dict['idiom'], template_id=template_dict['id'], seq_field=seq_field)
return constructor
def _get_field_info_from_name(self, node_type):
if ('-' in node_type):
(type_name, field_name) = node_type.split('-')
type_info = self.ast_wrapper.singular_types[type_name]
(field_info,) = [field for field in type_info.fields if (field.name == field_name)]
else:
type_info = self.ast_wrapper.singular_types[node_type]
assert (len(type_info.fields) == 1)
field_info = type_info.fields[0]
return field_info
@classmethod
def _node_type(cls, node):
if isinstance(node[0], dict):
if ('nt' in node[0]):
return node[0]['nt']
elif ('template_id' in node[0]):
return 'Template{}'.format(node[0]['template_id'])
else:
return node[0]
def convert_idiom_ast(self, idiom_ast, template_id=None, seq_fragment_type=None, seq_field=None):
if (template_id is not None):
(node_type, ref_symbols, hole, children) = idiom_ast
else:
(node_type, ref_symbols, children) = idiom_ast
is_template_node = False
extra_types = []
if isinstance(node_type, dict):
if seq_fragment_type:
suffix = '_{}_seq'.format(seq_fragment_type)
else:
suffix = ''
if ('template_id' in node_type):
node_type = 'Template{}{}'.format(node_type['template_id'], suffix)
is_template_node = True
elif (('nt' in node_type) and ('mt' in node_type)):
extra_types = ['Template{}{}'.format(i, suffix) for i in node_type['mt']]
node_type = node_type['nt']
if (seq_field is None):
field_infos = self.ast_wrapper.singular_types[node_type].fields
else:
field_infos = [seq_field.field]
children_to_convert = []
if is_template_node:
assert (len(children) == len(field_infos))
for (field, child) in zip(field_infos, children):
if ((field.hole_type == HoleType.ReplaceSelf) and field.seq):
children_to_convert.append((field, child))
else:
assert (not field.seq)
dummy_node = list(idiom_ast)
dummy_node[0] = '{}-{}'.format(node_type, field.name)
dummy_node[(- 1)] = [child]
children_to_convert.append((field, dummy_node))
else:
fields_by_name = {f.name: f for f in field_infos}
if (len(field_infos) == 0):
pass
elif (len(field_infos) == 1):
children_to_convert.append((field_infos[0], idiom_ast))
else:
prefix_len = (len(node_type) + 1)
for child in children:
field_name = self._node_type(child)[prefix_len:]
children_to_convert.append((fields_by_name[field_name], child))
assert (set((field.name for (field, _) in children_to_convert)) == set((field.name for field in field_infos)))
def result_creator(hole_values={}):
if ((template_id is not None) and (hole is not None) and (self.templates[template_id]['holes'][hole]['type'] == 'ReplaceSelf')):
return hole_values.get(hole, MissingValue)
result = {}
for (field, child_node) in children_to_convert:
if (field.type in self.ast_wrapper.primitive_types):
convert = (lambda node: (lambda hole_values: self.convert_builtin_type(field.type, self._node_type(node))))
else:
convert = functools.partial(self.convert_idiom_ast, template_id=template_id)
if field.seq:
value = []
while True:
if ((template_id is not None) and (child_node[2] is not None)):
hole_value = hole_values.get(child_node[2], [])
assert isinstance(hole_value, list)
value += hole_value
assert (len(child_node[(- 1)]) == 0)
break
(child_type, child_children) = (child_node[0], child_node[(- 1)])
if (isinstance(child_type, dict) and ('template_id' in child_type)):
value.append(convert(child_node, seq_fragment_type=(field.type if field.seq else None))(hole_values))
break
if (len(child_children) == 1):
assert (self._node_type(child_children[0]) == 'End')
break
elif (len(child_children) == 2):
value.append(convert(child_children[0])(hole_values))
child_node = child_children[1]
else:
raise ValueError('Unexpected number of children: {}'.format(len(child_children)))
present = bool(value)
elif field.opt:
if ((template_id is not None) and (child_node[2] is not None)):
assert (len(child_node[(- 1)]) == 0)
present = (child_node[2] in hole_values)
value = hole_values.get(child_node[2])
else:
assert (len(child_node[(- 1)]) == 1)
if (self._node_type(child_node[(- 1)][0]) == 'Null'):
value = None
present = False
else:
value = convert(child_node[(- 1)][0])(hole_values)
present = (value is not MissingValue)
elif ((template_id is not None) and (child_node[2] is not None)):
assert (len(child_node[(- 1)]) == 0)
value = hole_values[child_node[2]]
present = True
else:
assert (len(child_node[(- 1)]) == 1)
value = convert(child_node[(- 1)][0])(hole_values)
present = True
if present:
result[field.name] = value
result['_type'] = node_type
result['_extra_types'] = extra_types
return result
return result_creator
def convert_builtin_type(self, field_type, value):
if ((field_type == 'singleton') and (value == 'Null')):
return None
return value
|
def split_string_whitespace_and_camelcase(s):
split_space = s.split(' ')
result = []
for token in split_space:
if token:
camelcase_split_token = re.sub('([a-z])([A-Z])', '\\1\ue012\\2', token).split('\ue012')
result.extend(camelcase_split_token)
result.append(' ')
return result[:(- 1)]
|
@registry.register('grammar', 'python')
class PythonGrammar():
ast_wrapper = ast_util.ASTWrapper(asdl.parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Python.asdl')))
root_type = 'Module'
pointers = set()
@classmethod
def parse(cls, code, section):
try:
py_ast = ast.parse(code)
return cls.from_native_ast(py_ast)
except SyntaxError:
return None
@classmethod
def unparse(cls, tree, item):
ast_tree = cls.to_native_ast(tree)
return astor.to_source(ast_tree)
@classmethod
def tokenize_field_value(cls, field_value):
if isinstance(field_value, bytes):
field_value = field_value.encode('latin1')
else:
field_value = str(field_value)
return split_string_whitespace_and_camelcase(field_value)
@classmethod
def from_native_ast(cls, node):
if (not isinstance(node, ast.AST)):
return node
node_type = node.__class__.__name__
field_infos = {f.name: f for f in cls.ast_wrapper.singular_types[node_type].fields}
result = {'_type': node_type}
for (field, value) in ast.iter_fields(node):
if (field in PYTHON_AST_FIELD_BLACKLIST.get(node_type, set())):
continue
field_info = field_infos[field]
if (field_info.opt and (value is None)):
continue
if isinstance(value, (list, tuple)):
assert field_info.seq
if value:
result[field] = [cls.from_native_ast(v) for v in value]
else:
result[field] = cls.from_native_ast(value)
return result
@classmethod
def to_native_ast(cls, node):
if isinstance(node, (list, tuple)):
return [cls.to_native_ast(item) for item in node]
elif (not isinstance(node, dict)):
return node
result = getattr(ast, node['_type'])()
type_info = cls.ast_wrapper.singular_types[node['_type']]
for field_info in type_info.fields:
if field_info.seq:
value = node.get(field_info.name, [])
elif field_info.opt:
value = node.get(field_info.name, None)
else:
value = node[field_info.name]
setattr(result, field_info.name, cls.to_native_ast(value))
return result
|
def bimap(first, second):
return ({f: s for (f, s) in zip(first, second)}, {s: f for (f, s) in zip(first, second)})
|
def filter_nones(d):
return {k: v for (k, v) in d.items() if ((v is not None) and (v != []))}
|
def join(iterable, delimiter):
it = iter(iterable)
(yield next(it))
for x in it:
(yield delimiter)
(yield x)
|
def intersperse(delimiter, seq):
return itertools.islice(itertools.chain.from_iterable(zip(itertools.repeat(delimiter), seq)), 1, None)
|
@registry.register('grammar', 'spider')
class SpiderLanguage():
root_type = 'sql'
def __init__(self, output_from=False, use_table_pointer=False, include_literals=True, include_columns=True):
custom_primitive_type_checkers = {}
self.pointers = set()
if use_table_pointer:
custom_primitive_type_checkers['table'] = ast_util.FilterType(int)
self.pointers.add('table')
if include_columns:
custom_primitive_type_checkers['column'] = ast_util.FilterType(int)
self.pointers.add('column')
self.ast_wrapper = ast_util.ASTWrapper(asdl.parse(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Spider.asdl')), custom_primitive_type_checkers=custom_primitive_type_checkers)
self.output_from = output_from
self.include_literals = include_literals
self.include_columns = include_columns
if (not self.output_from):
sql_fields = self.ast_wrapper.product_types['sql'].fields
assert (sql_fields[1].name == 'from')
del sql_fields[1]
if (not use_table_pointer):
self.ast_wrapper.singular_types['Table'].fields[0].type = 'int'
if (not include_literals):
sql_fields = self.ast_wrapper.singular_types['sql'].fields
for field in sql_fields:
if (field.name == 'limit'):
field.opt = False
field.type = 'singleton'
if (not include_columns):
col_unit_fields = self.ast_wrapper.singular_types['col_unit'].fields
assert (col_unit_fields[1].name == 'col_id')
del col_unit_fields[1]
def parse(self, code, section):
return self.parse_sql(code)
def unparse(self, tree, item):
unparser = SpiderUnparser(self.ast_wrapper, item.schema)
return unparser.unparse_sql(tree)
@classmethod
def tokenize_field_value(cls, field_value):
if isinstance(field_value, bytes):
field_value_str = field_value.encode('latin1')
elif isinstance(field_value, str):
field_value_str = field_value
else:
field_value_str = str(field_value)
if ((field_value_str[0] == '"') and (field_value_str[(- 1)] == '"')):
field_value_str = field_value_str[1:(- 1)]
return [field_value_str]
def parse_val(self, val):
if isinstance(val, str):
if (not self.include_literals):
return {'_type': 'Terminal'}
return {'_type': 'String', 's': val}
elif isinstance(val, list):
return {'_type': 'ColUnit', 'c': self.parse_col_unit(val)}
elif isinstance(val, float):
if (not self.include_literals):
return {'_type': 'Terminal'}
return {'_type': 'Number', 'f': val}
elif isinstance(val, dict):
return {'_type': 'ValSql', 's': self.parse_sql(val)}
else:
raise ValueError(val)
def parse_col_unit(self, col_unit):
(agg_id, col_id, is_distinct) = col_unit
result = {'_type': 'col_unit', 'agg_id': {'_type': self.AGG_TYPES_F[agg_id]}, 'is_distinct': is_distinct}
if self.include_columns:
result['col_id'] = col_id
return result
def parse_val_unit(self, val_unit):
(unit_op, col_unit1, col_unit2) = val_unit
result = {'_type': self.UNIT_TYPES_F[unit_op], 'col_unit1': self.parse_col_unit(col_unit1)}
if (unit_op != 0):
result['col_unit2'] = self.parse_col_unit(col_unit2)
return result
def parse_table_unit(self, table_unit):
(table_type, value) = table_unit
if (table_type == 'sql'):
return {'_type': 'TableUnitSql', 's': self.parse_sql(value)}
elif (table_type == 'table_unit'):
return {'_type': 'Table', 'table_id': value}
else:
raise ValueError(table_type)
def parse_cond(self, cond, optional=False):
if (optional and (not cond)):
return None
if (len(cond) > 1):
return {'_type': self.LOGIC_OPERATORS_F[cond[1]], 'left': self.parse_cond(cond[:1]), 'right': self.parse_cond(cond[2:])}
((not_op, op_id, val_unit, val1, val2),) = cond
result = {'_type': self.COND_TYPES_F[op_id], 'val_unit': self.parse_val_unit(val_unit), 'val1': self.parse_val(val1)}
if (op_id == 1):
result['val2'] = self.parse_val(val2)
if not_op:
result = {'_type': 'Not', 'c': result}
return result
def parse_sql(self, sql, optional=False):
if (optional and (sql is None)):
return None
return filter_nones({'_type': 'sql', 'select': self.parse_select(sql['select']), 'where': self.parse_cond(sql['where'], optional=True), 'group_by': [self.parse_col_unit(u) for u in sql['groupBy']], 'order_by': self.parse_order_by(sql['orderBy']), 'having': self.parse_cond(sql['having'], optional=True), 'limit': (sql['limit'] if self.include_literals else (sql['limit'] is not None)), 'intersect': self.parse_sql(sql['intersect'], optional=True), 'except': self.parse_sql(sql['except'], optional=True), 'union': self.parse_sql(sql['union'], optional=True), **({'from': self.parse_from(sql['from'])} if self.output_from else {})})
def parse_select(self, select):
(is_distinct, aggs) = select
return {'_type': 'select', 'is_distinct': is_distinct, 'aggs': [self.parse_agg(agg) for agg in aggs]}
def parse_agg(self, agg):
(agg_id, val_unit) = agg
return {'_type': 'agg', 'agg_id': {'_type': self.AGG_TYPES_F[agg_id]}, 'val_unit': self.parse_val_unit(val_unit)}
def parse_from(self, from_):
return filter_nones({'_type': 'from', 'table_units': [self.parse_table_unit(u) for u in from_['table_units']], 'conds': self.parse_cond(from_['conds'], optional=True)})
def parse_order_by(self, order_by):
if (not order_by):
return None
(order, val_units) = order_by
return {'_type': 'order_by', 'order': {'_type': self.ORDERS_F[order]}, 'val_units': [self.parse_val_unit(v) for v in val_units]}
(COND_TYPES_F, COND_TYPES_B) = bimap(range(1, 10), ('Between', 'Eq', 'Gt', 'Lt', 'Ge', 'Le', 'Ne', 'In', 'Like'))
(UNIT_TYPES_F, UNIT_TYPES_B) = bimap(range(5), ('Column', 'Minus', 'Plus', 'Times', 'Divide'))
(AGG_TYPES_F, AGG_TYPES_B) = bimap(range(6), ('NoneAggOp', 'Max', 'Min', 'Count', 'Sum', 'Avg'))
(ORDERS_F, ORDERS_B) = bimap(('asc', 'desc'), ('Asc', 'Desc'))
(LOGIC_OPERATORS_F, LOGIC_OPERATORS_B) = bimap(('and', 'or'), ('And', 'Or'))
|
@attr.s
class SpiderUnparser():
ast_wrapper = attr.ib()
schema = attr.ib()
UNIT_TYPES_B = {'Minus': '-', 'Plus': '+', 'Times': '*', 'Divide': '/'}
COND_TYPES_B = {'Between': 'BETWEEN', 'Eq': '=', 'Gt': '>', 'Lt': '<', 'Ge': '>=', 'Le': '<=', 'Ne': '!=', 'In': 'IN', 'Like': 'LIKE'}
@classmethod
def conjoin_conds(cls, conds):
if (not conds):
return None
if (len(conds) == 1):
return conds[0]
return {'_type': 'And', 'left': conds[0], 'right': cls.conjoin_conds(conds[1:])}
@classmethod
def linearize_cond(cls, cond):
if (cond['_type'] in ('And', 'Or')):
(conds, keywords) = cls.linearize_cond(cond['right'])
return (([cond['left']] + conds), ([cond['_type']] + keywords))
else:
return ([cond], [])
def unparse_val(self, val):
if (val['_type'] == 'Terminal'):
return "'terminal'"
if (val['_type'] == 'String'):
return val['s']
if (val['_type'] == 'ColUnit'):
return self.unparse_col_unit(val['c'])
if (val['_type'] == 'Number'):
return str(val['f'])
if (val['_type'] == 'ValSql'):
return '({})'.format(self.unparse_sql(val['s']))
def unparse_col_unit(self, col_unit):
if ('col_id' in col_unit):
column = self.schema.columns[col_unit['col_id']]
if (column.table is None):
column_name = column.orig_name
else:
column_name = '{}.{}'.format(column.table.orig_name, column.orig_name)
else:
column_name = 'some_col'
if col_unit['is_distinct']:
column_name = 'DISTINCT {}'.format(column_name)
agg_type = col_unit['agg_id']['_type']
if (agg_type == 'NoneAggOp'):
return column_name
else:
return '{}({})'.format(agg_type, column_name)
def unparse_val_unit(self, val_unit):
if (val_unit['_type'] == 'Column'):
return self.unparse_col_unit(val_unit['col_unit1'])
col1 = self.unparse_col_unit(val_unit['col_unit1'])
col2 = self.unparse_col_unit(val_unit['col_unit2'])
return '{} {} {}'.format(col1, self.UNIT_TYPES_B[val_unit['_type']], col2)
def unparse_cond(self, cond, negated=False):
if (cond['_type'] == 'And'):
assert (not negated)
return '{} AND {}'.format(self.unparse_cond(cond['left']), self.unparse_cond(cond['right']))
elif (cond['_type'] == 'Or'):
assert (not negated)
return '{} OR {}'.format(self.unparse_cond(cond['left']), self.unparse_cond(cond['right']))
elif (cond['_type'] == 'Not'):
return self.unparse_cond(cond['c'], negated=True)
elif (cond['_type'] == 'Between'):
tokens = [self.unparse_val_unit(cond['val_unit'])]
if negated:
tokens.append('NOT')
tokens += ['BETWEEN', self.unparse_val(cond['val1']), 'AND', self.unparse_val(cond['val2'])]
return ' '.join(tokens)
tokens = [self.unparse_val_unit(cond['val_unit'])]
if negated:
tokens.append('NOT')
tokens += [self.COND_TYPES_B[cond['_type']], self.unparse_val(cond['val1'])]
return ' '.join(tokens)
def unparse_sql(self, tree):
if ('from' not in tree):
tree = dict(tree)
candidate_column_ids = set(self.ast_wrapper.find_all_descendants_of_type(tree, 'column', (lambda field: (field.type != 'sql'))))
candidate_columns = [self.schema.columns[i] for i in candidate_column_ids]
all_from_table_ids = set((column.table.id for column in candidate_columns if (column.table is not None)))
if (not all_from_table_ids):
all_from_table_ids = {0}
covered_tables = set()
candidate_table_ids = sorted(all_from_table_ids)
start_table_id = candidate_table_ids[0]
conds = []
for table_id in candidate_table_ids[1:]:
if (table_id in covered_tables):
continue
try:
path = nx.shortest_path(self.schema.foreign_key_graph, source=start_table_id, target=table_id)
except (nx.NetworkXNoPath, nx.NodeNotFound):
covered_tables.add(table_id)
continue
for (source_table_id, target_table_id) in zip(path, path[1:]):
if (target_table_id in covered_tables):
continue
all_from_table_ids.add(target_table_id)
(col1, col2) = self.schema.foreign_key_graph[source_table_id][target_table_id]['columns']
conds.append({'_type': 'Eq', 'val_unit': {'_type': 'Column', 'col_unit1': {'_type': 'col_unit', 'agg_id': {'_type': 'NoneAggOp'}, 'col_id': col1, 'is_distinct': False}}, 'val1': {'_type': 'ColUnit', 'c': {'_type': 'col_unit', 'agg_id': {'_type': 'NoneAggOp'}, 'col_id': col2, 'is_distinct': False}}})
table_units = [{'_type': 'Table', 'table_id': i} for i in sorted(all_from_table_ids)]
tree['from'] = {'_type': 'from', 'table_units': table_units}
cond_node = self.conjoin_conds(conds)
if (cond_node is not None):
tree['from']['conds'] = cond_node
result = [self.unparse_select(tree['select']), self.unparse_from(tree['from'])]
if ('where' in tree):
result += ['WHERE', self.unparse_cond(tree['where'])]
if ('group_by' in tree):
result += ['GROUP BY', ', '.join((self.unparse_col_unit(c) for c in tree['group_by']))]
if ('having' in tree):
result += ['HAVING', self.unparse_cond(tree['having'])]
if ('order_by' in tree):
result.append(self.unparse_order_by(tree['order_by']))
if ('limit' in tree):
if isinstance(tree['limit'], bool):
if tree['limit']:
result += ['LIMIT', '1']
else:
result += ['LIMIT', str(tree['limit'])]
if ('intersect' in tree):
result += ['INTERSECT', self.unparse_sql(tree['intersect'])]
if ('except' in tree):
result += ['EXCEPT', self.unparse_sql(tree['except'])]
if ('union' in tree):
result += ['UNION', self.unparse_sql(tree['union'])]
return ' '.join(result)
def unparse_select(self, select):
tokens = ['SELECT']
if select['is_distinct']:
tokens.append('DISTINCT')
tokens.append(', '.join((self.unparse_agg(agg) for agg in select.get('aggs', []))))
return ' '.join(tokens)
def unparse_agg(self, agg):
unparsed_val_unit = self.unparse_val_unit(agg['val_unit'])
agg_type = agg['agg_id']['_type']
if (agg_type == 'NoneAggOp'):
return unparsed_val_unit
else:
return '{}({})'.format(agg_type, unparsed_val_unit)
def unparse_from(self, from_):
if ('conds' in from_):
(all_conds, keywords) = self.linearize_cond(from_['conds'])
else:
(all_conds, keywords) = ([], [])
assert all(((keyword == 'And') for keyword in keywords))
cond_indices_by_table = collections.defaultdict(set)
tables_involved_by_cond_idx = collections.defaultdict(set)
for (i, cond) in enumerate(all_conds):
for column in self.ast_wrapper.find_all_descendants_of_type(cond, 'column'):
table = self.schema.columns[column].table
if (table is None):
continue
cond_indices_by_table[table.id].add(i)
tables_involved_by_cond_idx[i].add(table.id)
output_table_ids = set()
output_cond_indices = set()
tokens = ['FROM']
for (i, table_unit) in enumerate(from_.get('table_units', [])):
if (i > 0):
tokens += ['JOIN']
if (table_unit['_type'] == 'TableUnitSql'):
tokens.append('({})'.format(self.unparse_sql(table_unit['s'])))
elif (table_unit['_type'] == 'Table'):
table_id = table_unit['table_id']
tokens += [self.schema.tables[table_id].orig_name]
output_table_ids.add(table_id)
conds_to_output = []
for cond_idx in sorted(cond_indices_by_table[table_id]):
if (cond_idx in output_cond_indices):
continue
if (tables_involved_by_cond_idx[cond_idx] <= output_table_ids):
conds_to_output.append(all_conds[cond_idx])
output_cond_indices.add(cond_idx)
if conds_to_output:
tokens += ['ON']
tokens += list(intersperse('AND', (self.unparse_cond(cond) for cond in conds_to_output)))
return ' '.join(tokens)
def unparse_order_by(self, order_by):
return 'ORDER BY {} {}'.format(', '.join((self.unparse_val_unit(v) for v in order_by['val_units'])), order_by['order']['_type'])
|
class AbstractPreproc(metaclass=abc.ABCMeta):
"Used for preprocessing data according to the model's liking.\n\n Some tasks normally performed here:\n - Constructing a vocabulary from the training data\n - Transforming the items in some way, such as\n - Parsing the AST\n - \n - Loading and providing the pre-processed data to the model\n\n TODO:\n - Allow transforming items in a streaming fashion without loading all of them into memory first\n "
@abc.abstractmethod
def validate_item(self, item, section):
'Checks whether item can be successfully preprocessed.\n \n Returns a boolean and an arbitrary object.'
pass
@abc.abstractmethod
def add_item(self, item, section, validation_info):
'Add an item to be preprocessed.'
pass
@abc.abstractmethod
def clear_items(self):
'Clear the preprocessed items'
pass
@abc.abstractmethod
def save(self):
'Marks that all of the items have been preprocessed. Save state to disk.\n\n Used in preprocess.py, after reading all of the data.'
pass
@abc.abstractmethod
def load(self):
'Load state from disk.'
pass
@abc.abstractmethod
def dataset(self, section):
'Returns a torch.data.utils.Dataset instance.'
pass
|
def maybe_mask(attn, attn_mask):
if (attn_mask is not None):
assert all((((a == 1) or (b == 1) or (a == b)) for (a, b) in zip(attn.shape[::(- 1)], attn_mask.shape[::(- 1)]))), 'Attention mask shape {} should be broadcastable with attention shape {}'.format(attn_mask.shape, attn.shape)
attn.data.masked_fill_(attn_mask, (- float('inf')))
|
class Attention(torch.nn.Module):
def __init__(self, pointer):
super().__init__()
self.pointer = pointer
self.softmax = torch.nn.Softmax(dim=(- 1))
def forward(self, query, values, attn_mask=None):
attn_logits = self.pointer(query, values, attn_mask)
attn = self.softmax(attn_logits)
output = torch.bmm(attn.unsqueeze(1), values)
output = output.squeeze(1)
return (output, attn)
|
@registry.register('pointer', 'sdp')
class ScaledDotProductPointer(torch.nn.Module):
def __init__(self, query_size, key_size):
super().__init__()
self.query_proj = torch.nn.Linear(query_size, key_size)
self.temp = np.power(key_size, 0.5)
def forward(self, query, keys, attn_mask=None):
proj_query = self.query_proj(query).unsqueeze(2)
attn_logits = (torch.bmm(keys, proj_query).squeeze(2) / self.temp)
maybe_mask(attn_logits, attn_mask)
return attn_logits
|
@registry.register('attention', 'sdp')
class ScaledDotProductAttention(Attention):
def __init__(self, query_size, value_size):
super().__init__(ScaledDotProductPointer(query_size, value_size))
|
@registry.register('pointer', 'bahdanau')
class BahdanauPointer(torch.nn.Module):
def __init__(self, query_size, key_size, proj_size):
super().__init__()
self.compute_scores = torch.nn.Sequential(torch.nn.Linear((query_size + key_size), proj_size), torch.nn.Tanh(), torch.nn.Linear(proj_size, 1))
def forward(self, query: torch.Tensor, keys: torch.Tensor, attn_mask=None):
query_expanded = query.unsqueeze(1).expand((- 1), keys.shape[1], (- 1))
attn_logits = self.compute_scores(torch.cat((query_expanded, keys), dim=2))
attn_logits = attn_logits.squeeze(2)
maybe_mask(attn_logits, attn_mask)
return attn_logits
|
@registry.register('attention', 'bahdanau')
class BahdanauAttention(Attention):
def __init__(self, query_size, value_size, proj_size):
super().__init__(BahdanauPointer(query_size, value_size, proj_size))
|
class MultiHeadedAttention(torch.nn.Module):
def __init__(self, h, query_size, value_size, dropout=0.1):
super().__init__()
assert ((query_size % h) == 0)
assert ((value_size % h) == 0)
self.d_k = (value_size // h)
self.h = h
self.linears = torch.nn.ModuleList([torch.nn.Linear(query_size, value_size), torch.nn.Linear(value_size, value_size), torch.nn.Linear(value_size, value_size), torch.nn.Linear(value_size, value_size)])
self.attn = None
self.dropout = torch.nn.Dropout(p=dropout)
def forward(self, query, values, attn_mask=None):
'Implements Figure 2'
if (attn_mask is not None):
attn_mask = attn_mask.unsqueeze(1)
nbatches = query.size(0)
(query, keys, values) = [l(x).view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2) for (l, x) in zip(self.linears, (query, values, values))]
(x, self.attn) = transformer.attention(query, keys, values, mask=attn_mask, dropout=self.dropout)
x = x.transpose(1, 2).contiguous().view(nbatches, (- 1), (self.h * self.d_k))
x = x.squeeze(1)
return (self.linears[(- 1)](x), self.attn)
|
class ZippedDataset(torch.utils.data.Dataset):
def __init__(self, *components):
assert (len(components) >= 1)
lengths = [len(c) for c in components]
assert all(((lengths[0] == other) for other in lengths[1:])), "Lengths don't match: {}".format(lengths)
self.components = components
def __getitem__(self, idx):
return tuple((c[idx] for c in self.components))
def __len__(self):
return len(self.components[0])
|
@registry.register('model', 'EncDec')
class EncDecModel(torch.nn.Module):
class Preproc(abstract_preproc.AbstractPreproc):
def __init__(self, encoder, decoder, encoder_preproc, decoder_preproc):
super().__init__()
self.enc_preproc = registry.lookup('encoder', encoder['name']).Preproc(**encoder_preproc)
self.dec_preproc = registry.lookup('decoder', decoder['name']).Preproc(**decoder_preproc)
def validate_item(self, item, section):
(enc_result, enc_info) = self.enc_preproc.validate_item(item, section)
(dec_result, dec_info) = self.dec_preproc.validate_item(item, section)
return ((enc_result and dec_result), (enc_info, dec_info))
def add_item(self, item, section, validation_info):
(enc_info, dec_info) = validation_info
self.enc_preproc.add_item(item, section, enc_info)
self.dec_preproc.add_item(item, section, dec_info)
def clear_items(self):
self.enc_preproc.clear_items()
self.dec_preproc.clear_items()
def save(self):
self.enc_preproc.save()
self.dec_preproc.save()
def load(self):
self.enc_preproc.load()
self.dec_preproc.load()
def dataset(self, section):
return ZippedDataset(self.enc_preproc.dataset(section), self.dec_preproc.dataset(section))
def __init__(self, preproc, device, encoder, decoder):
super().__init__()
self.preproc = preproc
self.encoder = registry.construct('encoder', encoder, device=device, preproc=preproc.enc_preproc)
self.decoder = registry.construct('decoder', decoder, device=device, preproc=preproc.dec_preproc)
self.decoder.visualize_flag = False
if getattr(self.encoder, 'batched'):
self.compute_loss = self._compute_loss_enc_batched
else:
self.compute_loss = self._compute_loss_unbatched
def _compute_loss_enc_batched(self, batch, debug=False):
losses = []
enc_states = self.encoder([enc_input for (enc_input, dec_output) in batch])
for (enc_state, (enc_input, dec_output)) in zip(enc_states, batch):
loss = self.decoder.compute_loss(enc_input, dec_output, enc_state, debug)
losses.append(loss)
if debug:
return losses
else:
return torch.mean(torch.stack(losses, dim=0), dim=0)
def _compute_loss_enc_batched2(self, batch, debug=False):
losses = []
for (enc_input, dec_output) in batch:
(enc_state,) = self.encoder([enc_input])
loss = self.decoder.compute_loss(enc_input, dec_output, enc_state, debug)
losses.append(loss)
if debug:
return losses
else:
return torch.mean(torch.stack(losses, dim=0), dim=0)
def _compute_loss_unbatched(self, batch, debug=False):
losses = []
for (enc_input, dec_output) in batch:
enc_state = self.encoder(enc_input)
loss = self.decoder.compute_loss(enc_input, dec_output, enc_state, debug)
losses.append(loss)
if debug:
return losses
else:
return torch.mean(torch.stack(losses, dim=0), dim=0)
def eval_on_batch(self, batch):
mean_loss = self.compute_loss(batch).item()
batch_size = len(batch)
result = {'loss': (mean_loss * batch_size), 'total': batch_size}
return result
def begin_inference(self, orig_item, preproc_item):
(enc_input, _) = preproc_item
if self.visualize_flag:
print('question:')
print(enc_input['question'])
print('columns:')
print(enc_input['columns'])
print('tables:')
print(enc_input['tables'])
if getattr(self.encoder, 'batched'):
(enc_state,) = self.encoder([enc_input])
else:
enc_state = self.encoder(enc_input)
return self.decoder.begin_inference(enc_state, orig_item)
|
class IdiomPreproc(abstract_preproc.AbstractPreproc):
def __init__(self, grammar, save_path, censor_pointers):
self.save_path = save_path
self.censor_pointers = censor_pointers
self.grammar = registry.construct('grammar', grammar)
self.ast_wrapper = self.grammar.ast_wrapper
self.items = collections.defaultdict(list)
def validate_item(self, item, section):
parsed = self.grammar.parse(item.code, section)
if parsed:
self.ast_wrapper.verify_ast(parsed)
return (True, parsed)
return ((section != 'train'), None)
def add_item(self, item, section, validation_info):
converted = AstConverter(self.grammar, self.censor_pointers).convert(validation_info)
self.items[section].append({'text': item.text, 'ast': converted, 'orig': item.orig})
def clear_items(self):
self.items.clear()
def save(self):
os.makedirs(self.save_path, exist_ok=True)
for section in self.items:
with open(os.path.join(self.save_path, '{}.jsonl'.format(section)), 'w') as f:
for item in self.items[section]:
f.write((json.dumps(item) + '\n'))
expected_children = {'Null': [], 'End': []}
field_name_nodes = []
binarizers = []
literals = []
single_child = []
for (name, type_) in self.ast_wrapper.singular_types.items():
expected_children[name] = ['{}-{}'.format(name, field.name) for field in type_.fields]
field_name_nodes.extend(('{}-{}'.format(name, field.name) for field in type_.fields))
for field in type_.fields:
if (len(type_.fields) == 1):
field_name = name
else:
field_name = '{}-{}'.format(name, field.name)
if field.seq:
binarizers.append(field_name)
else:
single_child.append(field_name)
if (field.type in {'identifier', 'int', 'string', 'bytes', 'object', 'singleton'}):
literals.append(field_name)
if (field.type in self.grammar.pointers):
literals.append(field_name)
with open(os.path.join(self.save_path, 'grammar.json'), 'w') as f:
json.dump({'expected_children': expected_children, 'field_name_nodes': field_name_nodes, 'binarizers': binarizers, 'literals': literals, 'single_child': single_child}, f, indent=2, sort_keys=True)
def load(self):
raise NotImplementedError
def dataset(self, section):
raise NotImplementedError
|
class AstConverter():
def __init__(self, grammar, censor_pointers):
self.grammar = grammar
self.ast_wrapper = grammar.ast_wrapper
self.symbols = {}
self.split_constants = False
self.preserve_terminal_types = True
self.censor_pointers = censor_pointers
def convert(self, node):
if (not isinstance(node, dict)):
if (self.split_constants and isinstance(node, str)):
return [TreeNode(piece, [], []) for piece in node.split(' ')]
if self.preserve_terminal_types:
return TreeNode(node, [], [])
return TreeNode(repr(node), [], [])
node_type = node['_type']
children = []
fields_for_type = self.ast_wrapper.singular_types[node_type].fields
for field in fields_for_type:
field_node = node.get(field.name)
if (field.type in self.grammar.pointers):
ref_getter = functools.partial(self.pointer_ref_getter, field.type)
else:
ref_getter = (lambda value: [])
if (len(fields_for_type) == 1):
field_tree_node_name = node_type
else:
field_tree_node_name = '{}-{}'.format(node_type, field.name)
if field.seq:
field_tree_node = self.make_binarized_list(ref_getter, field_tree_node_name, field.type, (field_node or []))
else:
if (field.opt and (field_node is None)):
child = TreeNode('Null', [], [])
refs = []
else:
if (self.censor_pointers and (field.type in self.grammar.pointers)):
child = None
else:
child = self.convert(field_node)
refs = ref_getter(field_node)
if (child is None):
child_list = []
elif isinstance(child, list):
child_list = child
else:
child_list = [child]
field_tree_node = TreeNode(field_tree_node_name, refs, child_list)
children.append(field_tree_node)
if (len(children) == 1):
return children[0]
else:
return TreeNode(node_type, [], children)
def pointer_ref_getter(self, pointer_type, pointer_value):
symbol = (pointer_type, pointer_value)
symbol_id = self.symbols.get(symbol)
if (symbol_id is None):
symbol_id = self.symbols[symbol] = len(self.symbols)
return [symbol_id]
def make_binarized_list(self, ref_getter, node_name, elem_type, elems):
root = tree_node = TreeNode(node_name, [], [])
for elem in elems:
new_tree_node = TreeNode(node_name, [], [])
if (self.censor_pointers and (elem_type in self.grammar.pointers)):
raise NotImplementedError
else:
elem_tree_node = self.convert(elem)
tree_node.children.extend([elem_tree_node, new_tree_node])
tree_node.refs.extend(ref_getter(elem))
tree_node = new_tree_node
tree_node.children.append(TreeNode('End', [], []))
return root
|
@registry.register('model', 'IdiomMiner')
class IdiomMinerModel():
'A dummy model for housing IdiomPreproc.'
Preproc = IdiomPreproc
|
class RecurrentDropoutLSTMCell(RNNCellBase):
def __init__(self, input_size, hidden_size, dropout=0.0):
super(RecurrentDropoutLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.dropout = dropout
self.W_i = Parameter(torch.Tensor(hidden_size, input_size))
self.U_i = Parameter(torch.Tensor(hidden_size, hidden_size))
self.W_f = Parameter(torch.Tensor(hidden_size, input_size))
self.U_f = Parameter(torch.Tensor(hidden_size, hidden_size))
self.W_c = Parameter(torch.Tensor(hidden_size, input_size))
self.U_c = Parameter(torch.Tensor(hidden_size, hidden_size))
self.W_o = Parameter(torch.Tensor(hidden_size, input_size))
self.U_o = Parameter(torch.Tensor(hidden_size, hidden_size))
self.bias_ih = Parameter(torch.Tensor((4 * hidden_size)))
self.bias_hh = Parameter(torch.Tensor((4 * hidden_size)))
self._input_dropout_mask = self._h_dropout_mask = None
self.reset_parameters()
def reset_parameters(self):
init.orthogonal_(self.W_i)
init.orthogonal_(self.U_i)
init.orthogonal_(self.W_f)
init.orthogonal_(self.U_f)
init.orthogonal_(self.W_c)
init.orthogonal_(self.U_c)
init.orthogonal_(self.W_o)
init.orthogonal_(self.U_o)
self.bias_ih.data.fill_(0.0)
self.bias_ih.data[self.hidden_size:(2 * self.hidden_size)].fill_(1.0)
self.bias_hh.data.fill_(0.0)
def set_dropout_masks(self, batch_size):
if self.dropout:
if self.training:
new_tensor = self.W_i.data.new
self._input_dropout_mask = Variable(torch.bernoulli(new_tensor(4, batch_size, self.input_size).fill_((1 - self.dropout))), requires_grad=False)
self._h_dropout_mask = Variable(torch.bernoulli(new_tensor(4, batch_size, self.hidden_size).fill_((1 - self.dropout))), requires_grad=False)
else:
self._input_dropout_mask = self._h_dropout_mask = ([(1.0 - self.dropout)] * 4)
else:
self._input_dropout_mask = self._h_dropout_mask = ([1.0] * 4)
def forward(self, input, hidden_state):
def get_mask_slice(mask, idx):
if isinstance(mask, list):
return mask[idx]
else:
return mask[idx][:input.size(0)]
(h_tm1, c_tm1) = hidden_state
xi_t = F.linear((input * get_mask_slice(self._input_dropout_mask, 0)), self.W_i)
xf_t = F.linear((input * get_mask_slice(self._input_dropout_mask, 1)), self.W_f)
xc_t = F.linear((input * get_mask_slice(self._input_dropout_mask, 2)), self.W_c)
xo_t = F.linear((input * get_mask_slice(self._input_dropout_mask, 3)), self.W_o)
hi_t = F.linear((h_tm1 * get_mask_slice(self._h_dropout_mask, 0)), self.U_i)
hf_t = F.linear((h_tm1 * get_mask_slice(self._h_dropout_mask, 1)), self.U_f)
hc_t = F.linear((h_tm1 * get_mask_slice(self._h_dropout_mask, 2)), self.U_c)
ho_t = F.linear((h_tm1 * get_mask_slice(self._h_dropout_mask, 3)), self.U_o)
if input.is_cuda:
igates = torch.cat([xi_t, xf_t, xc_t, xo_t], dim=(- 1))
hgates = torch.cat([hi_t, hf_t, hc_t, ho_t], dim=(- 1))
state = fusedBackend.LSTMFused.apply
return state(igates, hgates, c_tm1, self.bias_ih, self.bias_hh)
else:
i_t = torch.sigmoid((((xi_t + self.bias_ih[:self.hidden_size]) + hi_t) + self.bias_hh[:self.hidden_size]))
f_t = torch.sigmoid((((xf_t + self.bias_ih[self.hidden_size:(2 * self.hidden_size)]) + hf_t) + self.bias_hh[self.hidden_size:(2 * self.hidden_size)]))
c_t = ((f_t * c_tm1) + (i_t * torch.tanh((((xc_t + self.bias_ih[(2 * self.hidden_size):(3 * self.hidden_size)]) + hc_t) + self.bias_hh[(2 * self.hidden_size):(3 * self.hidden_size)]))))
o_t = torch.sigmoid((((xo_t + self.bias_ih[(3 * self.hidden_size):(4 * self.hidden_size)]) + ho_t) + self.bias_hh[(3 * self.hidden_size):(4 * self.hidden_size)]))
h_t = (o_t * torch.tanh(c_t))
return (h_t, c_t)
|
class ParentFeedingLSTMCell(RNNCellBase):
def __init__(self, input_size, hidden_size):
super(ParentFeedingLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.W_i = Parameter(torch.Tensor(hidden_size, input_size))
self.U_i = Parameter(torch.Tensor(hidden_size, hidden_size))
self.U_i_p = Parameter(torch.Tensor(hidden_size, hidden_size))
self.b_i = Parameter(torch.Tensor(hidden_size))
self.W_f = Parameter(torch.Tensor(hidden_size, input_size))
self.U_f = Parameter(torch.Tensor(hidden_size, hidden_size))
self.U_f_p = Parameter(torch.Tensor(hidden_size, hidden_size))
self.b_f = Parameter(torch.Tensor(hidden_size))
self.b_f_p = Parameter(torch.Tensor(hidden_size))
self.W_c = Parameter(torch.Tensor(hidden_size, input_size))
self.U_c = Parameter(torch.Tensor(hidden_size, hidden_size))
self.U_c_p = Parameter(torch.Tensor(hidden_size, hidden_size))
self.b_c = Parameter(torch.Tensor(hidden_size))
self.W_o = Parameter(torch.Tensor(hidden_size, input_size))
self.U_o = Parameter(torch.Tensor(hidden_size, hidden_size))
self.U_o_p = Parameter(torch.Tensor(hidden_size, hidden_size))
self.b_o = Parameter(torch.Tensor(hidden_size))
self.reset_parameters()
def reset_parameters(self):
init.orthogonal(self.W_i)
init.orthogonal(self.U_i)
init.orthogonal(self.U_i_p)
init.orthogonal(self.W_f)
init.orthogonal(self.U_f)
init.orthogonal(self.U_f_p)
init.orthogonal(self.W_c)
init.orthogonal(self.U_c)
init.orthogonal(self.U_c_p)
init.orthogonal(self.W_o)
init.orthogonal(self.U_o)
init.orthogonal(self.U_o_p)
self.b_i.data.fill_(0.0)
self.b_c.data.fill_(0.0)
self.b_o.data.fill_(0.0)
self.b_f.data.fill_(1.0)
self.b_f_p.data.fill_(1.0)
def forward(self, input, hidden_states):
(h_tm1, c_tm1, h_tm1_p, c_tm1_p) = hidden_states
i_t = torch.sigmoid((((F.linear(input, self.W_i) + F.linear(h_tm1, self.U_i)) + F.linear(h_tm1_p, self.U_i_p)) + self.b_i))
xf_t = F.linear(input, self.W_f)
f_t = torch.sigmoid(((xf_t + F.linear(h_tm1, self.U_f)) + self.b_f))
f_t_p = torch.sigmoid(((xf_t + F.linear(h_tm1_p, self.U_f_p)) + self.b_f_p))
xc_t = (((F.linear(input, self.W_c) + F.linear(h_tm1, self.U_c)) + F.linear(h_tm1_p, self.U_c_p)) + self.b_c)
c_t = (((f_t * c_tm1) + (f_t_p * c_tm1_p)) + (i_t * torch.tanh(xc_t)))
o_t = torch.sigmoid((((F.linear(input, self.W_o) + F.linear(h_tm1, self.U_o)) + F.linear(h_tm1_p, self.U_o_p)) + self.b_o))
h_t = (o_t * torch.tanh(c_t))
return (h_t, c_t)
|
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, bidirectional=False, dropout=0.0, cell_factory=RecurrentDropoutLSTMCell):
super(LSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.dropout = dropout
self.cell_factory = cell_factory
num_directions = (2 if bidirectional else 1)
self.lstm_cells = []
for direction in range(num_directions):
cell = cell_factory(input_size, hidden_size, dropout=dropout)
self.lstm_cells.append(cell)
suffix = ('_reverse' if (direction == 1) else '')
cell_name = 'cell{}'.format(suffix)
self.add_module(cell_name, cell)
def forward(self, input, hidden_state=None):
is_packed = isinstance(input, PackedSequence)
if is_packed:
(input, batch_sizes) = input
max_batch_size = batch_sizes[0]
else:
batch_sizes = None
max_batch_size = input.size(1)
for cell in self.lstm_cells:
cell.set_dropout_masks(max_batch_size)
if (hidden_state is None):
num_directions = (2 if self.bidirectional else 1)
hx = input.new_zeros(num_directions, max_batch_size, self.hidden_size, requires_grad=False)
hidden_state = (hx, hx)
rec_factory = (variable_recurrent_factory if is_packed else Recurrent)
if self.bidirectional:
layer = (rec_factory(self.cell), rec_factory(self.cell_reverse, reverse=True))
else:
layer = (rec_factory(self.cell),)
func = StackedRNN(layer, num_layers=1, lstm=True, dropout=0.0, train=self.training)
(next_hidden, output) = func(input, hidden_state, weight=[[], []], batch_sizes=batch_sizes)
if is_packed:
output = PackedSequence(output, batch_sizes)
return (output, next_hidden)
|
@attr.s
class NL2CodeEncoderState():
state = attr.ib()
memory = attr.ib()
words = attr.ib()
def find_word_occurrences(self, word):
return [i for (i, w) in enumerate(self.words) if (w == word)]
|
@registry.register('encoder', 'NL2Code')
class NL2CodeEncoder(torch.nn.Module):
batched = False
class Preproc(abstract_preproc.AbstractPreproc):
def __init__(self, save_path, min_freq=3, max_count=5000):
self.vocab_path = os.path.join(save_path, 'enc_vocab.json')
self.data_dir = os.path.join(save_path, 'enc')
self.vocab_builder = vocab.VocabBuilder(min_freq, max_count)
self.init_items()
self.vocab = None
def init_items(self):
self.texts = {'train': [], 'val': [], 'test': []}
def validate_item(self, item, section):
return (True, None)
def add_item(self, item, section, validation_info):
if (section == 'train'):
for token in item.text:
self.vocab_builder.add_word(token)
self.texts[section].append(item.text)
def clear_items(self):
self.init_items()
def preprocess_item(self, item, validation_info):
return item.text
def save(self):
os.makedirs(self.data_dir, exist_ok=True)
self.vocab = self.vocab_builder.finish()
self.vocab.save(self.vocab_path)
for (section, texts) in self.texts.items():
with open(os.path.join(self.data_dir, (section + '.jsonl')), 'w') as f:
for text in texts:
f.write((json.dumps(text) + '\n'))
def load(self):
self.vocab = vocab.Vocab.load(self.vocab_path)
def dataset(self, section):
return [json.loads(line) for line in open(os.path.join(self.data_dir, (section + '.jsonl')))]
def __init__(self, device, preproc, word_emb_size=128, recurrent_size=256, dropout=0.0):
super().__init__()
self._device = device
self.desc_vocab = preproc.vocab
self.word_emb_size = word_emb_size
self.recurrent_size = recurrent_size
assert ((self.recurrent_size % 2) == 0)
self.desc_embedding = torch.nn.Embedding(num_embeddings=len(self.desc_vocab), embedding_dim=self.word_emb_size)
self.encoder = lstm.LSTM(input_size=self.word_emb_size, hidden_size=(self.recurrent_size // 2), bidirectional=True, dropout=dropout)
def forward(self, desc_words):
desc_indices = torch.tensor(self.desc_vocab.indices(desc_words), device=self._device).unsqueeze(0)
desc_emb = self.desc_embedding(desc_indices)
desc_emb = desc_emb.transpose(0, 1)
(outputs, state) = self.encoder(desc_emb)
return NL2CodeEncoderState(state=state, memory=outputs.transpose(0, 1), words=desc_words)
|
class TemplateTraversalType(enum.Enum):
DEFAULT = 0
CHILDREN_APPLY = 1
LIST_LENGTH_APPLY = 2
|
@attr.s(frozen=True)
class TemplateTraversalState():
node = attr.ib()
parent_field_type = attr.ib()
type = attr.ib(default=TemplateTraversalType.DEFAULT)
|
@attr.s(frozen=True)
class TemplateActionProvider():
model = attr.ib()
queue = attr.ib()
buffer = attr.ib(factory=pyrsistent.pdeque)
last_returned = attr.ib(default=None)
@classmethod
def build(cls, model, tree, parent_field_type):
return cls(model, pyrsistent.pdeque([TemplateTraversalState(tree, parent_field_type)]))
@property
def finished(self):
return ((not self.queue) and (not self.buffer))
def step(self, last_choice):
queue = self.queue
buffer = self.buffer
def rv():
return (buffer.left, TemplateActionProvider(self.model, queue, buffer.popleft(), buffer.left))
if buffer:
if (not isinstance(self.last_returned, ast_util.HoleValuePlaceholder)):
assert ((last_choice == self.last_returned) or (isinstance(self.last_returned, tuple) and (last_choice in self.last_returned)))
return rv()
to_return = False
while queue:
item = queue.right
node = item.node
parent_field_type = item.parent_field_type
queue = queue.pop()
to_return = False
result_finished = False
new_last_choice = None
if ((item.type == TemplateTraversalType.DEFAULT) and isinstance(node, (list, tuple))):
hvps = [elem for elem in node if isinstance(elem, ast_util.HoleValuePlaceholder)]
num_seq_hvps = sum((hvp.is_seq for hvp in hvps))
assert (num_seq_hvps in (0, 1))
node_type = (parent_field_type + '*')
if num_seq_hvps:
allowed_lengths = [l for l in self.model.preproc.seq_lengths[node_type] if (l >= (len(node) - 1))]
rule_indices = tuple((self.model.rules_index[(node_type, length)] for length in allowed_lengths))
else:
rule = (node_type, len(node))
rule_indices = (self.model.rules_index[rule],)
if (len(rule_indices) == 1):
buffer = buffer.append(rule_indices[0])
new_last_choice = rule_indices[0]
else:
to_return = True
buffer = buffer.append(rule_indices)
queue = queue.append(TemplateTraversalState(type=TemplateTraversalType.LIST_LENGTH_APPLY, node=node, parent_field_type=parent_field_type))
result_finished = True
elif ((item.type == TemplateTraversalType.LIST_LENGTH_APPLY) and isinstance(node, (list, tuple))):
(list_type, num_children) = self.model.preproc.all_rules[last_choice]
assert (list_type == (parent_field_type + '*'))
assert (num_children > 0)
if (num_children < len(node)):
assert isinstance(node[(- 1)], ast_util.HoleValuePlaceholder)
assert node[(- 1)].is_seq
assert ((num_children + 1) == len(node))
node = node[:(- 1)]
elif (len(node) < num_children):
assert isinstance(node[(- 1)], ast_util.HoleValuePlaceholder)
assert node[(- 1)].is_seq
node = (node + ([node[(- 1)]] * (num_children - len(node))))
if (self.model.preproc.use_seq_elem_rules and (parent_field_type in self.model.ast_wrapper.sum_types)):
parent_field_type += '_seq_elem'
for (i, elem) in reversed(list(enumerate(node))):
queue = queue.append(TemplateTraversalState(node=elem, parent_field_type=parent_field_type))
result_finished = True
elif isinstance(node, ast_util.HoleValuePlaceholder):
buffer = buffer.append(node)
result_finished = True
elif (parent_field_type in self.model.preproc.grammar.pointers):
assert isinstance(node, int)
buffer = buffer.append(node)
result_finished = True
elif (parent_field_type in self.model.ast_wrapper.primitive_types):
field_type = type(node).__name__
field_value_split = (self.model.preproc.grammar.tokenize_field_value(node) + [vocab.EOS])
buffer = buffer.extend(field_value_split)
result_finished = True
if result_finished:
if to_return:
return rv()
else:
last_choice = new_last_choice
continue
type_info = self.model.ast_wrapper.singular_types[node['_type']]
if (item.type == TemplateTraversalType.CHILDREN_APPLY):
(node_type, children_presence) = self.model.preproc.all_rules[last_choice]
assert (node_type == node['_type'])
for (field_info, present) in reversed(list(zip(type_info.fields, children_presence))):
if present:
assert (field_info.name in node)
elif (field_info.name not in node):
continue
else:
field_value = node[field_info.name]
if isinstance(field_value, ast_util.HoleValuePlaceholder):
assert field_value.is_opt
elif isinstance(field_value, list):
assert (len(field_value) == 1)
assert isinstance(field_value[0], ast_util.HoleValuePlaceholder)
assert field_value[0].is_seq
else:
raise ValueError(field_value)
continue
field_value = node[field_info.name]
queue = queue.append(TemplateTraversalState(node=field_value, parent_field_type=field_info.type))
last_choice = new_last_choice
continue
if (parent_field_type in self.model.preproc.sum_type_constructors):
rule = (parent_field_type, type_info.name)
rule_idx = self.model.rules_index[rule]
assert (not node.get('_extra_types', ()))
buffer = buffer.append(rule_idx)
if type_info.fields:
present = decoder.get_field_presence_info(self.model.ast_wrapper, node, type_info.fields)
hvp_present = False
presence_values = []
for (i, field_info) in enumerate(type_info.fields):
if (field_info.name not in node):
presence_values.append((False,))
continue
field_value = node[field_info.name]
if (isinstance(field_value, ast_util.HoleValuePlaceholder) or (isinstance(field_value, list) and (len(field_value) == 1) and isinstance(field_value[0], ast_util.HoleValuePlaceholder))):
presence = tuple(set((info[i] for info in self.model.preproc.field_presence_infos[node['_type']])))
presence_values.append(presence)
hvp_present = True
else:
presence_values.append((present[i],))
if hvp_present:
rule_indices = tuple((rule_idx for rule_idx in (self.model.rules_index.get((node['_type'], p)) for p in itertools.product(*presence_values)) if (rule_idx is not None)))
if (len(rule_indices) == 1):
buffer = buffer.append(rule_indices[0])
new_last_choice = rule_indices[0]
else:
to_return = True
buffer = buffer.append(rule_indices)
else:
rule = (node['_type'], tuple(present))
rule_idx = self.model.rules_index[rule]
buffer = buffer.append(rule_idx)
new_last_choice = rule_idx
queue = queue.append(TemplateTraversalState(type=TemplateTraversalType.CHILDREN_APPLY, node=node, parent_field_type=parent_field_type))
if to_return:
return rv()
else:
last_choice = new_last_choice
continue
return rv()
|
@attr.s
class SpiderEncoderState():
state = attr.ib()
memory = attr.ib()
question_memory = attr.ib()
schema_memory = attr.ib()
words = attr.ib()
pointer_memories = attr.ib()
pointer_maps = attr.ib()
def find_word_occurrences(self, word):
return [i for (i, w) in enumerate(self.words) if (w == word)]
|
@attr.s
class PreprocessedSchema():
column_names = attr.ib(factory=list)
table_names = attr.ib(factory=list)
table_bounds = attr.ib(factory=list)
column_to_table = attr.ib(factory=dict)
table_to_columns = attr.ib(factory=dict)
foreign_keys = attr.ib(factory=dict)
foreign_keys_tables = attr.ib(factory=(lambda : collections.defaultdict(set)))
primary_keys = attr.ib(factory=list)
|
class SpiderEncoderV2Preproc(abstract_preproc.AbstractPreproc):
def __init__(self, save_path, min_freq=3, max_count=5000, include_table_name_in_column=True, word_emb=None, count_tokens_in_word_emb_for_vocab=False):
if (word_emb is None):
self.word_emb = None
else:
self.word_emb = registry.construct('word_emb', word_emb)
self.data_dir = os.path.join(save_path, 'enc')
self.include_table_name_in_column = include_table_name_in_column
self.count_tokens_in_word_emb_for_vocab = count_tokens_in_word_emb_for_vocab
self.init_texts()
self.vocab_builder = vocab.VocabBuilder(min_freq, max_count)
self.vocab_path = os.path.join(save_path, 'enc_vocab.json')
self.vocab = None
self.counted_db_ids = set()
self.preprocessed_schemas = {}
def init_texts(self):
self.texts = {'train': [], 'val': [], 'test': []}
def validate_item(self, item, section):
return (True, None)
def add_item(self, item, section, validation_info):
preprocessed = self.preprocess_item(item, validation_info)
self.texts[section].append(preprocessed)
if (section == 'train'):
if (item.schema.db_id in self.counted_db_ids):
to_count = preprocessed['question']
else:
self.counted_db_ids.add(item.schema.db_id)
to_count = itertools.chain(preprocessed['question'], *preprocessed['columns'], *preprocessed['tables'])
for token in to_count:
count_token = ((self.word_emb is None) or self.count_tokens_in_word_emb_for_vocab or (self.word_emb.lookup(token) is None))
if count_token:
self.vocab_builder.add_word(token)
def clear_items(self):
self.init_texts()
def preprocess_item(self, item, validation_info):
if self.word_emb:
question = self.word_emb.tokenize(item.orig['question'])
else:
question = item.text
preproc_schema = self._preprocess_schema(item.schema)
return {'question': question, 'db_id': item.schema.db_id, 'columns': preproc_schema.column_names, 'tables': preproc_schema.table_names, 'table_bounds': preproc_schema.table_bounds, 'column_to_table': preproc_schema.column_to_table, 'table_to_columns': preproc_schema.table_to_columns, 'foreign_keys': preproc_schema.foreign_keys, 'foreign_keys_tables': preproc_schema.foreign_keys_tables, 'primary_keys': preproc_schema.primary_keys}
def _preprocess_schema(self, schema):
if (schema.db_id in self.preprocessed_schemas):
return self.preprocessed_schemas[schema.db_id]
result = self._preprocess_schema_uncached(schema)
self.preprocessed_schemas[schema.db_id] = result
return result
def _preprocess_schema_uncached(self, schema):
r = PreprocessedSchema()
last_table_id = None
for (i, column) in enumerate(schema.columns):
column_name = (['<type: {}>'.format(column.type)] + self._tokenize(column.name, column.unsplit_name))
if self.include_table_name_in_column:
if (column.table is None):
table_name = ['<any-table>']
else:
table_name = self._tokenize(column.table.name, column.table.unsplit_name)
column_name += (['<table-sep>'] + table_name)
r.column_names.append(column_name)
table_id = (None if (column.table is None) else column.table.id)
r.column_to_table[str(i)] = table_id
if (table_id is not None):
columns = r.table_to_columns.setdefault(str(table_id), [])
columns.append(i)
if (last_table_id != table_id):
r.table_bounds.append(i)
last_table_id = table_id
if (column.foreign_key_for is not None):
r.foreign_keys[str(column.id)] = column.foreign_key_for.id
r.foreign_keys_tables[str(column.table.id)].add(column.foreign_key_for.table.id)
r.table_bounds.append(len(schema.columns))
assert (len(r.table_bounds) == (len(schema.tables) + 1))
for (i, table) in enumerate(schema.tables):
r.table_names.append(self._tokenize(table.name, table.unsplit_name))
r.foreign_keys_tables = serialization.to_dict_with_sorted_values(r.foreign_keys_tables)
r.primary_keys = [column.id for column in table.primary_keys for table in schema.tables]
return r
def _tokenize(self, presplit, unsplit):
if self.word_emb:
return self.word_emb.tokenize(unsplit)
return presplit
def save(self):
os.makedirs(self.data_dir, exist_ok=True)
self.vocab = self.vocab_builder.finish()
self.vocab.save(self.vocab_path)
for (section, texts) in self.texts.items():
with open(os.path.join(self.data_dir, (section + '.jsonl')), 'w') as f:
for text in texts:
f.write((json.dumps(text) + '\n'))
def load(self):
self.vocab = vocab.Vocab.load(self.vocab_path)
def dataset(self, section):
return [json.loads(line) for line in open(os.path.join(self.data_dir, (section + '.jsonl')))]
|
@registry.register('encoder', 'spiderv2')
class SpiderEncoderV2(torch.nn.Module):
batched = True
Preproc = SpiderEncoderV2Preproc
def __init__(self, device, preproc, word_emb_size=128, recurrent_size=256, dropout=0.0, question_encoder=('emb', 'bilstm'), column_encoder=('emb', 'bilstm'), table_encoder=('emb', 'bilstm'), update_config={}, include_in_memory=('question', 'column', 'table'), batch_encs_update=True):
super().__init__()
self._device = device
self.preproc = preproc
self.vocab = preproc.vocab
self.word_emb_size = word_emb_size
self.recurrent_size = recurrent_size
assert ((self.recurrent_size % 2) == 0)
self.include_in_memory = set(include_in_memory)
self.dropout = dropout
self.question_encoder = self._build_modules(question_encoder)
self.column_encoder = self._build_modules(column_encoder)
self.table_encoder = self._build_modules(table_encoder)
update_modules = {'relational_transformer': spider_enc_modules.RelationalTransformerUpdate, 'none': spider_enc_modules.NoOpUpdate}
self.encs_update = registry.instantiate(update_modules[update_config['name']], update_config, device=self._device, hidden_size=recurrent_size)
self.batch_encs_update = batch_encs_update
def _build_modules(self, module_types):
module_builder = {'emb': (lambda : spider_enc_modules.LookupEmbeddings(self._device, self.vocab, self.preproc.word_emb, self.word_emb_size)), 'linear': (lambda : spider_enc_modules.EmbLinear(input_size=self.word_emb_size, output_size=self.word_emb_size)), 'bilstm': (lambda : spider_enc_modules.BiLSTM(input_size=self.word_emb_size, output_size=self.recurrent_size, dropout=self.dropout, summarize=False)), 'bilstm-native': (lambda : spider_enc_modules.BiLSTM(input_size=self.word_emb_size, output_size=self.recurrent_size, dropout=self.dropout, summarize=False, use_native=True)), 'bilstm-summarize': (lambda : spider_enc_modules.BiLSTM(input_size=self.word_emb_size, output_size=self.recurrent_size, dropout=self.dropout, summarize=True)), 'bilstm-native-summarize': (lambda : spider_enc_modules.BiLSTM(input_size=self.word_emb_size, output_size=self.recurrent_size, dropout=self.dropout, summarize=True, use_native=True))}
modules = []
for module_type in module_types:
modules.append(module_builder[module_type]())
return torch.nn.Sequential(*modules)
def forward_unbatched(self, desc):
(q_enc, (_, _)) = self.question_encoder([desc['question']])
(c_enc, c_boundaries) = self.column_encoder(desc['columns'])
column_pointer_maps = {i: list(range(left, right)) for (i, (left, right)) in enumerate(zip(c_boundaries, c_boundaries[1:]))}
(t_enc, t_boundaries) = self.table_encoder(desc['tables'])
c_enc_length = c_enc.shape[0]
table_pointer_maps = {i: ([idx for col in desc['table_to_columns'][str(i)] for idx in column_pointer_maps[col]] + list(range((left + c_enc_length), (right + c_enc_length)))) for (i, (left, right)) in enumerate(zip(t_boundaries, t_boundaries[1:]))}
(q_enc_new, c_enc_new, t_enc_new) = self.encs_update(desc, q_enc, c_enc, c_boundaries, t_enc, t_boundaries)
memory = []
if ('question' in self.include_in_memory):
memory.append(q_enc_new)
if ('column' in self.include_in_memory):
memory.append(c_enc_new)
if ('table' in self.include_in_memory):
memory.append(t_enc_new)
memory = torch.cat(memory, dim=1)
return SpiderEncoderState(state=None, memory=memory, words=desc['question'], pointer_memories={'column': c_enc_new, 'table': torch.cat((c_enc_new, t_enc_new), dim=1)}, pointer_maps={'column': column_pointer_maps, 'table': table_pointer_maps})
def forward(self, descs):
(q_enc, _) = self.question_encoder([[desc['question']] for desc in descs])
(c_enc, c_boundaries) = self.column_encoder([desc['columns'] for desc in descs])
column_pointer_maps = [{i: list(range(left, right)) for (i, (left, right)) in enumerate(zip(c_boundaries_for_item, c_boundaries_for_item[1:]))} for (batch_idx, c_boundaries_for_item) in enumerate(c_boundaries)]
(t_enc, t_boundaries) = self.table_encoder([desc['tables'] for desc in descs])
c_enc_lengths = list(c_enc.orig_lengths())
table_pointer_maps = [{i: ([idx for col in desc['table_to_columns'][str(i)] for idx in column_pointer_maps[batch_idx][col]] + list(range((left + c_enc_lengths[batch_idx]), (right + c_enc_lengths[batch_idx])))) for (i, (left, right)) in enumerate(zip(t_boundaries_for_item, t_boundaries_for_item[1:]))} for (batch_idx, (desc, t_boundaries_for_item)) in enumerate(zip(descs, t_boundaries))]
if self.batch_encs_update:
(q_enc_new, c_enc_new, t_enc_new) = self.encs_update(descs, q_enc, c_enc, c_boundaries, t_enc, t_boundaries)
result = []
for (batch_idx, desc) in enumerate(descs):
if self.batch_encs_update:
q_enc_new_item = q_enc_new.select(batch_idx).unsqueeze(0)
c_enc_new_item = c_enc_new.select(batch_idx).unsqueeze(0)
t_enc_new_item = t_enc_new.select(batch_idx).unsqueeze(0)
else:
(q_enc_new_item, c_enc_new_item, t_enc_new_item) = self.encs_update.forward_unbatched(desc, q_enc.select(batch_idx).unsqueeze(1), c_enc.select(batch_idx).unsqueeze(1), c_boundaries[batch_idx], t_enc.select(batch_idx).unsqueeze(1), t_boundaries[batch_idx])
memory = []
if ('question' in self.include_in_memory):
memory.append(q_enc_new_item)
if ('column' in self.include_in_memory):
memory.append(c_enc_new_item)
if ('table' in self.include_in_memory):
memory.append(t_enc_new_item)
memory = torch.cat(memory, dim=1)
result.append(SpiderEncoderState(state=None, memory=memory, question_memory=q_enc_new_item, schema_memory=torch.cat((c_enc_new_item, t_enc_new_item), dim=1), words=desc['question'], pointer_memories={'column': c_enc_new_item, 'table': torch.cat((c_enc_new_item, t_enc_new_item), dim=1)}, pointer_maps={'column': column_pointer_maps[batch_idx], 'table': table_pointer_maps[batch_idx]}))
return result
|
def relative_attention_logits(query, key, relation):
qk_matmul = torch.matmul(query, key.transpose((- 2), (- 1)))
q_t = query.permute(0, 2, 1, 3)
r_t = relation.transpose((- 2), (- 1))
q_tr_t_matmul = torch.matmul(q_t, r_t)
q_tr_tmatmul_t = q_tr_t_matmul.permute(0, 2, 1, 3)
return ((qk_matmul + q_tr_tmatmul_t) / math.sqrt(query.shape[(- 1)]))
|
def relative_attention_values(weight, value, relation):
wv_matmul = torch.matmul(weight, value)
w_t = weight.permute(0, 2, 1, 3)
w_tr_matmul = torch.matmul(w_t, relation)
w_tr_matmul_t = w_tr_matmul.permute(0, 2, 1, 3)
return (wv_matmul + w_tr_matmul_t)
|
def clones(module_fn, N):
return nn.ModuleList([module_fn() for _ in range(N)])
|
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size((- 1))
scores = (torch.matmul(query, key.transpose((- 2), (- 1))) / math.sqrt(d_k))
if (mask is not None):
scores = scores.masked_fill((mask == 0), (- 1000000000.0))
p_attn = F.softmax(scores, dim=(- 1))
if (dropout is not None):
p_attn = dropout(p_attn)
return (torch.matmul(p_attn, value), scores.squeeze(1).squeeze(1))
|
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
'Take in model size and number of heads.'
super(MultiHeadedAttention, self).__init__()
assert ((d_model % h) == 0)
self.d_k = (d_model // h)
self.h = h
self.linears = clones((lambda : nn.Linear(d_model, d_model)), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
'Implements Figure 2'
if (mask is not None):
mask = mask.unsqueeze(1)
nbatches = query.size(0)
(query, key, value) = [l(x).view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2) for (l, x) in zip(self.linears, (query, key, value))]
(x, self.attn) = attention(query, key, value, mask=mask, dropout=self.dropout)
x = x.transpose(1, 2).contiguous().view(nbatches, (- 1), (self.h * self.d_k))
if (query.dim() == 3):
x = x.squeeze(1)
return self.linears[(- 1)](x)
|
def attention_with_relations(query, key, value, relation_k, relation_v, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size((- 1))
scores = relative_attention_logits(query, key, relation_k)
if (mask is not None):
scores = scores.masked_fill((mask == 0), (- 1000000000.0))
p_attn = F.softmax(scores, dim=(- 1))
if (dropout is not None):
p_attn = dropout(p_attn)
return (relative_attention_values(p_attn, value, relation_v), p_attn)
|
class MultiHeadedAttentionWithRelations(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
'Take in model size and number of heads.'
super(MultiHeadedAttentionWithRelations, self).__init__()
assert ((d_model % h) == 0)
self.d_k = (d_model // h)
self.h = h
self.linears = clones((lambda : nn.Linear(d_model, d_model)), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, relation_k, relation_v, mask=None):
if (mask is not None):
mask = mask.unsqueeze(1)
nbatches = query.size(0)
(query, key, value) = [l(x).view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2) for (l, x) in zip(self.linears, (query, key, value))]
(x, self.attn) = attention_with_relations(query, key, value, relation_k, relation_v, mask=mask, dropout=self.dropout)
x = x.transpose(1, 2).contiguous().view(nbatches, (- 1), (self.h * self.d_k))
return self.linears[(- 1)](x)
|
class Encoder(nn.Module):
'Core encoder is a stack of N layers'
def __init__(self, layer, layer_size, N, tie_layers=False):
super(Encoder, self).__init__()
if tie_layers:
self.layer = layer()
self.layers = [self.layer for _ in range(N)]
else:
self.layers = clones(layer, N)
self.norm = nn.LayerNorm(layer_size)
def forward(self, x, relation, mask):
'Pass the input (and mask) through each layer in turn.'
for layer in self.layers:
x = layer(x, relation, mask)
return self.norm(x)
|
class SublayerConnection(nn.Module):
'\n A residual connection followed by a layer norm.\n Note for code simplicity the norm is first as opposed to last.\n '
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = nn.LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
'Apply residual connection to any sublayer with the same size.'
return (x + self.dropout(sublayer(self.norm(x))))
|
class EncoderLayer(nn.Module):
'Encoder is made up of self-attn and feed forward (defined below)'
def __init__(self, size, self_attn, feed_forward, num_relation_kinds, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones((lambda : SublayerConnection(size, dropout)), 2)
self.size = size
self.relation_k_emb = nn.Embedding(num_relation_kinds, self.self_attn.d_k)
self.relation_v_emb = nn.Embedding(num_relation_kinds, self.self_attn.d_k)
def forward(self, x, relation, mask):
'Follow Figure 1 (left) for connections.'
relation_k = self.relation_k_emb(relation)
relation_v = self.relation_v_emb(relation)
x = self.sublayer[0](x, (lambda x: self.self_attn(x, x, x, relation_k, relation_v, mask)))
return self.sublayer[1](x, self.feed_forward)
|
class PositionwiseFeedForward(nn.Module):
'Implements FFN equation.'
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
|
@registry.register('lr_scheduler', 'warmup_polynomial')
@attr.s
class WarmupPolynomialLRScheduler():
optimizer = attr.ib()
num_warmup_steps = attr.ib()
start_lr = attr.ib()
end_lr = attr.ib()
decay_steps = attr.ib()
power = attr.ib()
def update_lr(self, current_step):
if (current_step < self.num_warmup_steps):
warmup_frac_done = (current_step / self.num_warmup_steps)
new_lr = (self.start_lr * warmup_frac_done)
else:
new_lr = (((self.start_lr - self.end_lr) * ((1 - ((current_step - self.num_warmup_steps) / self.decay_steps)) ** self.power)) + self.end_lr)
for param_group in self.optimizer.param_groups:
param_group['lr'] = new_lr
|
@registry.register('lr_scheduler', 'warmup_cosine')
@attr.s
class WarmupCosineLRScheduler():
optimizer = attr.ib()
num_warmup_steps = attr.ib()
start_lr = attr.ib()
end_lr = attr.ib()
decay_steps = attr.ib()
def update_lr(self, current_step):
if (current_step < self.num_warmup_steps):
warmup_frac_done = (current_step / self.num_warmup_steps)
new_lr = (self.start_lr * warmup_frac_done)
else:
new_lr = ((((self.start_lr - self.end_lr) * 0.5) * (1 + math.cos(((math.pi * (current_step - self.num_warmup_steps)) / self.decay_steps)))) + self.end_lr)
for param_group in self.optimizer.param_groups:
param_group['lr'] = new_lr
|
@registry.register('lr_scheduler', 'noop')
class NoOpLRScheduler():
def __init__(self, optimizer):
pass
def update_lr(self, current_step):
pass
|
@registry.register('optimizer', 'adamw')
class AdamW(torch.optim.Optimizer):
'Implements Adam algorithm.\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n\n **Modified to implement AdamW, see https://arxiv.org/pdf/1711.05101v3.pdf**\n '
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
update = (exp_avg / denom)
if (group['weight_decay'] != 0):
update += (group['weight_decay'] * p.data)
p.data.add_(((- step_size) * update))
return loss
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.