code
stringlengths 17
6.64M
|
---|
class CoreNLP():
def __init__(self):
if (not os.environ.get('CORENLP_HOME')):
os.environ['CORENLP_HOME'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../third_party/stanford-corenlp-full-2018-10-05'))
if (not os.path.exists(os.environ['CORENLP_HOME'])):
raise Exception('Please install Stanford CoreNLP and put it at {}.\n\n Direct URL: http://nlp.stanford.edu/software/stanford-corenlp-full-2018-10-05.zip\n Landing page: https://stanfordnlp.github.io/CoreNLP/'.format(os.environ['CORENLP_HOME']))
self.client = corenlp.CoreNLPClient()
def __del__(self):
self.client.stop()
def annotate(self, text, annotators=None, output_format=None, properties=None):
try:
result = self.client.annotate(text, annotators, output_format, properties)
except (corenlp.client.PermanentlyFailedException, requests.exceptions.ConnectionError) as e:
print('\nWARNING: CoreNLP connection timeout. Recreating the server...', file=sys.stderr)
self.client.stop()
self.client.start()
result = self.client.annotate(text, annotators, output_format, properties)
return result
|
def annotate(text, annotators=None, output_format=None, properties=None):
global _singleton
if (not _singleton):
_singleton = CoreNLP()
return _singleton.annotate(text, annotators, output_format, properties)
|
class Embedder(metaclass=abc.ABCMeta):
@abc.abstractmethod
def tokenize(self, sentence):
'Given a string, return a list of tokens suitable for lookup.'
pass
@abc.abstractmethod
def untokenize(self, tokens):
'Undo tokenize.'
pass
@abc.abstractmethod
def lookup(self, token):
'Given a token, return a vector embedding if token is in vocabulary.\n\n If token is not in the vocabulary, then return None.'
pass
@abc.abstractmethod
def contains(self, token):
pass
@abc.abstractmethod
def to(self, device):
'Transfer the pretrained embeddings to the given device.'
pass
|
@registry.register('word_emb', 'glove')
class GloVe(Embedder):
def __init__(self, kind):
cache = os.path.join(os.environ.get('CACHE_DIR', os.getcwd()), '.vector_cache')
self.glove = torchtext.vocab.GloVe(name=kind, cache=cache)
self.dim = self.glove.dim
self.vectors = self.glove.vectors
@functools.lru_cache(maxsize=1024)
def tokenize(self, text):
ann = corenlp.annotate(text, annotators=['tokenize', 'ssplit'])
return [tok.word.lower() for sent in ann.sentence for tok in sent.token]
def untokenize(self, tokens):
return ' '.join(tokens)
def lookup(self, token):
i = self.glove.stoi.get(token)
if (i is None):
return None
return self.vectors[i]
def contains(self, token):
return (token in self.glove.stoi)
def to(self, device):
self.vectors = self.vectors.to(device)
|
@registry.register('word_emb', 'bpemb')
class BPEmb(Embedder):
def __init__(self, dim, vocab_size, lang='en'):
self.bpemb = bpemb.BPEmb(lang=lang, dim=dim, vs=vocab_size)
self.dim = dim
self.vectors = torch.from_numpy(self.bpemb.vectors)
def tokenize(self, text):
return self.bpemb.encode(text)
def untokenize(self, tokens):
return self.bpemb.decode(tokens)
def lookup(self, token):
i = self.bpemb.spm.PieceToId(token)
if (i == self.bpemb.spm.unk_id()):
return None
return self.vectors[i]
def contains(self, token):
return (self.lookup(token) is not None)
def to(self, device):
self.vectors = self.vectors.to(device)
|
def argsort(items, key=(lambda x: x), reverse=False):
(orig_to_sort, sorted_items) = zip(*sorted(enumerate(items), key=(lambda x: key(x[1])), reverse=reverse))
sort_to_orig = tuple((x[0] for x in sorted(enumerate(orig_to_sort), key=operator.itemgetter(1))))
return (sorted_items, sort_to_orig, orig_to_sort)
|
def sort_lists_by_length(lists):
return argsort(lists, key=len, reverse=True)
|
def batch_bounds_for_packing(lengths):
'Returns how many items in batch have length >= i at step i.\n Examples:\n [5] -> [1, 1, 1, 1, 1]\n [5, 5] -> [2, 2, 2, 2, 2]\n [5, 3] -> [2, 2, 2, 1, 1]\n [5, 4, 1, 1] -> [4, 2, 2, 2, 1]\n '
last_length = 0
count = len(lengths)
result = []
for (i, (length, group)) in enumerate(itertools.groupby(reversed(lengths))):
if ((i > 0) and (length <= last_length)):
raise ValueError('lengths must be decreasing and positive')
result.extend(([count] * (length - last_length)))
count -= sum((1 for _ in group))
last_length = length
return result
|
def _make_packed_sequence(data, batch_sizes):
return torch.nn.utils.rnn.PackedSequence(data, torch.LongTensor(batch_sizes).to(data.device))
|
@attr.s(frozen=True)
class PackedSequencePlus():
ps = attr.ib()
lengths = attr.ib()
sort_to_orig = attr.ib(converter=np.array)
orig_to_sort = attr.ib(converter=np.array)
@lengths.validator
def descending(self, attribute, value):
for (x, y) in zip(value, value[1:]):
if (not (x >= y)):
raise ValueError('Lengths are not descending: {}'.format(value))
def __attrs_post_init__(self):
self.__dict__['cum_batch_sizes'] = np.cumsum(([0] + self.ps.batch_sizes[:(- 1)].tolist())).astype(np.int_)
def apply(self, fn):
return attr.evolve(self, ps=torch.nn.utils.rnn.PackedSequence(fn(self.ps.data), self.ps.batch_sizes))
def with_new_ps(self, ps):
return attr.evolve(self, ps=ps)
def pad(self, batch_first, others_to_unsort=(), padding_value=0.0):
(padded, seq_lengths) = torch.nn.utils.rnn.pad_packed_sequence(self.ps, batch_first=batch_first, padding_value=padding_value)
results = (padded[self.sort_to_orig], [seq_lengths[i] for i in self.sort_to_orig])
return (results + tuple((t[self.sort_to_orig] for t in others_to_unsort)))
def cuda(self):
if self.ps.data.is_cuda:
return self
return self.apply((lambda d: d.cuda()))
def raw_index(self, orig_batch_idx, seq_idx):
result = (np.take(self.cum_batch_sizes, seq_idx) + np.take(self.sort_to_orig, orig_batch_idx))
if (self.ps.data is not None):
assert np.all((result < len(self.ps.data)))
return result
def select(self, orig_batch_idx, seq_idx=None):
if (seq_idx is None):
return self.ps.data[self.raw_index(orig_batch_idx, range(self.lengths[self.sort_to_orig[orig_batch_idx]]))]
return self.ps.data[self.raw_index(orig_batch_idx, seq_idx)]
def select_subseq(self, orig_batch_indices):
lengths = [self.lengths[self.sort_to_orig[i]] for i in orig_batch_indices]
return self.from_gather(lengths=lengths, map_index=self.raw_index, gather_from_indices=(lambda indices: self.ps.data[torch.LongTensor(indices)]))
def orig_index(self, raw_idx):
seq_idx = (np.searchsorted(self.cum_batch_sizes, raw_idx, side='right') - 1)
batch_idx = (raw_idx - self.cum_batch_sizes[seq_idx])
orig_batch_idx = self.sort_to_orig[batch_idx]
return (orig_batch_idx, seq_idx)
def orig_batch_indices(self):
result = []
for bs in self.ps.batch_sizes:
result.extend(self.orig_to_sort[:bs])
return np.array(result)
def orig_lengths(self):
for sort_idx in self.sort_to_orig:
(yield self.lengths[sort_idx])
def expand(self, k):
v = self.ps.data
ps_data = v.unsqueeze(1).repeat(1, k, *([1] * (v.dim() - 1))).view((- 1), *v.shape[1:])
batch_sizes = (np.array(self.ps.batch_sizes) * k).tolist()
lengths = np.repeat(self.lengths, k).tolist()
sort_to_orig = [exp_i for i in self.sort_to_orig for exp_i in range((i * k), ((i * k) + k))]
orig_to_sort = [exp_i for i in self.orig_to_sort for exp_i in range((i * k), ((i * k) + k))]
return PackedSequencePlus(_make_packed_sequence(ps_data, batch_sizes), lengths, sort_to_orig, orig_to_sort)
@classmethod
def from_lists(cls, lists, item_shape, tensor_type, item_to_tensor):
result = tensor_type(sum((len(lst) for lst in lists)), *item_shape)
(sorted_lists, sort_to_orig, orig_to_sort) = sort_lists_by_length(lists)
lengths = [len(lst) for lst in sorted_lists]
batch_bounds = batch_bounds_for_packing(lengths)
idx = 0
for (i, bound) in enumerate(batch_bounds):
for (batch_idx, lst) in enumerate(sorted_lists[:bound]):
item_to_tensor(lst[i], batch_idx, result[idx])
idx += 1
result = torch.autograd.Variable(result)
return cls(_make_packed_sequence(result, batch_bounds), lengths, sort_to_orig, orig_to_sort)
@classmethod
def from_gather(cls, lengths, map_index, gather_from_indices):
(sorted_lengths, sort_to_orig, orig_to_sort) = argsort(lengths, reverse=True)
batch_bounds = batch_bounds_for_packing(sorted_lengths)
indices = []
for (seq_idx, bound) in enumerate(batch_bounds):
for batch_idx in orig_to_sort[:bound]:
assert (seq_idx < lengths[batch_idx])
indices.append(map_index(batch_idx, seq_idx))
result = gather_from_indices(indices)
return cls(_make_packed_sequence(result, batch_bounds), sorted_lengths, sort_to_orig, orig_to_sort)
@classmethod
def cat_seqs(cls, items):
batch_size = len(items[0].lengths)
assert all(((len(item.lengths) == batch_size) for item in items[1:]))
unsorted_concat_lengths = np.zeros(batch_size, dtype=np.int)
for item in items:
unsorted_concat_lengths += list(item.orig_lengths())
concat_data = torch.cat([item.ps.data for item in items], dim=0)
concat_data_base_indices = np.cumsum(([0] + [item.ps.data.shape[0] for item in items]))
item_map_per_batch_item = []
for batch_idx in range(batch_size):
item_map_per_batch_item.append([(item_idx, item, i) for (item_idx, item) in enumerate(items) for i in range(item.lengths[item.sort_to_orig[batch_idx]])])
def map_index(batch_idx, seq_idx):
(item_idx, item, seq_idx_within_item) = item_map_per_batch_item[batch_idx][seq_idx]
return (concat_data_base_indices[item_idx] + item.raw_index(batch_idx, seq_idx_within_item))
return cls.from_gather(lengths=unsorted_concat_lengths, map_index=map_index, gather_from_indices=(lambda indices: concat_data[torch.LongTensor(indices)]))
|
def compute_metrics(config_path, config_args, section, inferred_path, logdir=None, evaluate_beams_individually=False):
if config_args:
config = json.loads(_jsonnet.evaluate_file(config_path, tla_codes={'args': config_args}))
else:
config = json.loads(_jsonnet.evaluate_file(config_path))
if (('model_name' in config) and logdir):
logdir = os.path.join(logdir, config['model_name'])
if logdir:
inferred_path = inferred_path.replace('__LOGDIR__', logdir)
inferred = open(inferred_path)
data = registry.construct('dataset', config['data'][section])
inferred_lines = list(inferred)
if (len(inferred_lines) < len(data)):
raise Exception('Not enough inferred: {} vs {}'.format(len(inferred_lines), len(data)))
if evaluate_beams_individually:
return (logdir, evaluate_all_beams(data, inferred_lines))
else:
return (logdir, evaluate_default(data, inferred_lines))
|
def load_from_lines(inferred_lines):
for line in inferred_lines:
infer_results = json.loads(line)
if infer_results.get('beams', ()):
inferred_code = infer_results['beams'][0]['inferred_code']
else:
inferred_code = None
(yield (inferred_code, infer_results))
|
def evaluate_default(data, inferred_lines):
metrics = data.Metrics(data)
for (inferred_code, infer_results) in load_from_lines(inferred_lines):
if ('index' in infer_results):
metrics.add(data[infer_results['index']], inferred_code)
else:
metrics.add(None, inferred_code, obsolete_gold_code=infer_results['gold_code'])
return metrics.finalize()
|
def evaluate_all_beams(data, inferred_lines):
metrics = data.Metrics(data)
results = []
for (_, infer_results) in load_from_lines(inferred_lines):
for_beam = metrics.evaluate_all(infer_results['index'], data[infer_results['index']], [beam['inferred_code'] for beam in infer_results.get('beams', ())])
results.append(for_beam)
return results
|
def read_index(filename):
index = []
with open(filename) as index_file:
while True:
offset = index_file.read(8)
if (not offset):
break
(offset,) = struct.unpack('<Q', offset)
index.append(offset)
return index
|
class IndexedFileWriter(object):
def __init__(self, path):
self.f = open(path, 'wb')
self.index_f = open((path + '.index'), 'wb')
def append(self, record):
offset = self.f.tell()
self.f.write(record)
self.index_f.write(struct.pack('<Q', offset))
def close(self):
self.f.close()
self.index_f.close()
|
class IndexedFileReader(object):
def __init__(self, path):
self.f = open(path, 'rb')
self.index = read_index((path + '.index'))
self.lengths = [(end - start) for (start, end) in zip(([0] + self.index), (self.index + [os.path.getsize(path)]))]
def __len__(self):
return len(self.index)
def __getitem__(self, idx):
if (not isinstance(idx, int)):
return TypeError('index must be integer')
self.file.seek(self.index[idx])
return self.file.read(self.lengths[idx])
|
class Parallelizer(ABC):
'\n A parallelizer is a general purpose task used to handle the situation of\n executing functions f(x) on a series of functions and inputs f_i, x_i, where\n f is often quite large and thus difficult to transport to a given\n process, but there are typically fewer distinct values of f, allowing\n for each process to use at most one value of f.\n\n If it takes a constant amount of time `t_f` to ship f to a process and a constant\n amount of time `t_x` to ship x to a process then execute f(x) and return the\n result, then if we have `n` xs for a given f, then using `k` processes\n takes time `k * t_f + n/k * t_x`, which is minimized when `t_f = n * t_x / k^2`,\n or when `k = sqrt(n * t_x / t_f)`.\n\n If `ratio=None` is passed in, we empirically `ratio = t_f/t_x`\n with an estimate of `t_x` and `t_f`, unless there are not many\n examples, in which case 1 is used by default.\n '
def __init__(self, max_nproc, ratio=None):
'\n Create a Parallelizer\n\n Args\n nproc: the total number of processes to use\n ratio: the predicted ratio between the amount of time to set up a process\n and the amount of time to execute the function.\n '
assert (isinstance(max_nproc, int) and (max_nproc >= 1))
assert ((ratio is None) or (isinstance(ratio, float) and (ratio > 0)))
self.max_nproc = max_nproc
self.ratio = ratio
@abstractmethod
def start_worker(self, f, input_queue, output_queue):
'\n Create a worker, and transport f to it.\n '
@abstractmethod
def create_queue(self):
'\n Create a queue.Queue that interacts well with the worker threads.\n '
def _map_pooled(self, f, n_workers, xs, pbar):
'\n A generator that yields map(f, xs)\n '
input = self.create_queue()
output = self.create_queue()
for indexed in enumerate(xs):
input.put(indexed)
for _ in range(n_workers):
self.start_worker(f, input, output)
results = ([None] * len(xs))
filled = ([False] * len(xs))
to_yield = 0
for _ in range(len(xs)):
(index, value) = output.get()
results[index] = value
filled[index] = True
pbar.update()
while ((to_yield < len(xs)) and filled[to_yield]):
(yield results[to_yield])
to_yield += 1
def _compute_ratio(self, grouped_args, n_samples=5):
total = sum((len(xs) for (_, xs) in grouped_args))
if (total <= (n_samples * 10)):
return 1.0
(f, (x, *_)) = grouped_args[0]
start = time.time()
input_queue = self.create_queue()
output_queue = self.create_queue()
for idx in range(n_samples):
input_queue.put((idx, x))
self.start_worker(f, input_queue, output_queue)
output_queue.get()
end_1 = time.time()
for _ in range((n_samples - 1)):
output_queue.get()
end_n = time.time()
t_x = ((end_n - end_1) / n_samples)
t_f = ((end_1 - start) - t_x)
return max((t_f / t_x), 1e-10)
def pbar(self, total):
return tqdm.tqdm(total=total, smoothing=0, dynamic_ncols=True)
def parallel_map(self, grouped_args):
'\n Run the function fn on each of the args, in parallel, and yields each of\n the results.\n\n Args\n grouped_args: an iterable containing pairs (f, xs).\n x values are "large" in some way and will be transported as\n little as possible\n Yields\n (f(x) for f, xs in grouped_args for x in xs)\n '
grouped_args = list(grouped_args)
if (not grouped_args):
return
total = sum((len(xs) for (_, xs) in grouped_args))
if (self.max_nproc == 1):
pbar = self.pbar(total)
for (f, xs) in grouped_args:
for x in xs:
(yield f(x))
pbar.update()
pbar.close()
return
ratio = self.ratio
if (ratio is None):
ratio = self._compute_ratio(grouped_args)
print(('Computed ratio: %.2f' % ratio))
pbar = self.pbar(total)
nworkers = []
generators = []
while grouped_args:
(f, xs) = grouped_args.pop(0)
k = min(max(1, int(((len(xs) / ratio) ** 0.5))), self.max_nproc)
while ((k + sum(nworkers)) > self.max_nproc):
nworkers.pop(0)
(yield from generators.pop(0))
nworkers.append(k)
generators.append(self._map_pooled(f, k, xs, pbar))
for gen in generators:
(yield from gen)
pbar.close()
|
class CPUParallelizer(Parallelizer):
def start_worker(self, f, input_queue, output_queue):
worker = multiprocessing.Process(target=self.multi_processing_worker, args=(f, input_queue, output_queue))
worker.start()
def create_queue(self):
return multiprocessing.Queue()
@staticmethod
def multi_processing_worker(f, input_queue, output_queue):
while True:
try:
(index, x) = input_queue.get(False)
except queue.Empty:
return
output_queue.put((index, f(x)))
|
class RandomState():
def __init__(self):
self.random_mod_state = random.getstate()
self.np_state = np.random.get_state()
self.torch_cpu_state = torch.get_rng_state()
self.torch_gpu_states = [torch.cuda.get_rng_state(d) for d in range(torch.cuda.device_count())]
def restore(self):
random.setstate(self.random_mod_state)
np.random.set_state(self.np_state)
torch.set_rng_state(self.torch_cpu_state)
for (d, state) in enumerate(self.torch_gpu_states):
torch.cuda.set_rng_state(state, d)
|
class RandomContext():
'Save and restore state of PyTorch, NumPy, Python RNGs.'
def __init__(self, seed=None):
outside_state = RandomState()
random.seed(seed)
np.random.seed(seed)
if (seed is None):
torch.manual_seed(random.randint(((- sys.maxsize) - 1), sys.maxsize))
else:
torch.manual_seed(seed)
self.inside_state = RandomState()
outside_state.restore()
self._active = False
def __enter__(self):
if self._active:
raise Exception('RandomContext can be active only once')
self.outside_state = RandomState()
self.inside_state.restore()
self._active = True
def __exit__(self, exception_type, exception_value, traceback):
self.inside_state = RandomState()
self.outside_state.restore()
self.outside_state = None
self._active = False
|
def register(kind, name):
kind_registry = _REGISTRY[kind]
def decorator(obj):
if (name in kind_registry):
raise LookupError('{} already registered as kind {}'.format(name, kind))
kind_registry[name] = obj
return obj
return decorator
|
def lookup(kind, name):
if isinstance(name, collections.abc.Mapping):
name = name['name']
if (kind not in _REGISTRY):
raise KeyError('Nothing registered under "{}"'.format(kind))
return _REGISTRY[kind][name]
|
def construct(kind, config, unused_keys=(), **kwargs):
return instantiate(lookup(kind, config), config, (unused_keys + ('name',)), **kwargs)
|
def instantiate(callable, config, unused_keys=(), **kwargs):
merged = {**config, **kwargs}
signature = inspect.signature(callable)
for (name, param) in signature.parameters.items():
if (param.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.VAR_POSITIONAL)):
raise ValueError('Unsupported kind for param {}: {}'.format(name, param.kind))
if any(((param.kind == inspect.Parameter.VAR_KEYWORD) for param in signature.parameters.values())):
return callable(**merged)
missing = {}
for key in list(merged.keys()):
if (key not in signature.parameters):
if (key not in unused_keys):
missing[key] = merged[key]
merged.pop(key)
if missing:
print('WARNING {}: superfluous {}'.format(callable, missing), file=sys.stderr)
return callable(**merged)
|
class ArgsDict(dict):
def __init__(self, **kwargs):
super(ArgsDict, self).__init__()
for (key, value) in kwargs.items():
self[key] = value
self.__dict__ = self
|
def load_checkpoint(model, optimizer, model_dir, map_location=None, step=None):
path = os.path.join(model_dir, 'model_checkpoint')
if (step is not None):
path += '-{:08d}'.format(step)
if os.path.exists(path):
print(('Loading model from %s' % path))
checkpoint = torch.load(path, map_location=map_location)
old_state_dict = model.state_dict()
for key in old_state_dict.keys():
if (key not in checkpoint['model']):
checkpoint['model'][key] = old_state_dict[key]
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
return checkpoint.get('step', 0)
return 0
|
def load_and_map_checkpoint(model, model_dir, remap):
path = os.path.join(model_dir, 'model_checkpoint')
print(('Loading parameters %s from %s' % (remap.keys(), model_dir)))
checkpoint = torch.load(path)
new_state_dict = model.state_dict()
for (name, value) in remap.items():
new_state_dict[name] = checkpoint['model'][value]
model.load_state_dict(new_state_dict)
|
def save_checkpoint(model, optimizer, step, model_dir, ignore=[], keep_every_n=10000000):
if (not os.path.exists(model_dir)):
os.makedirs(model_dir)
path_without_step = os.path.join(model_dir, 'model_checkpoint')
step_padded = format(step, '08d')
state_dict = model.state_dict()
if ignore:
for key in state_dict.keys():
for item in ignore:
if key.startswith(item):
state_dict.pop(key)
path_with_step = '{}-{}'.format(path_without_step, step_padded)
torch.save({'model': state_dict, 'optimizer': optimizer.state_dict(), 'step': step}, path_with_step)
if os.path.exists(path_without_step):
os.unlink(path_without_step)
try:
os.symlink(os.path.basename(path_with_step), path_without_step)
except OSError:
shutil.copy2(path_with_step, path_without_step)
if (keep_every_n is not None):
all_checkpoints = []
for name in os.listdir(model_dir):
m = CHECKPOINT_PATTERN.match(name)
if ((m is None) or (name == os.path.basename(path_with_step))):
continue
checkpoint_step = int(m.group(1))
all_checkpoints.append((checkpoint_step, name))
all_checkpoints.sort()
last_step = float('-inf')
for (checkpoint_step, name) in all_checkpoints:
if ((checkpoint_step - last_step) >= keep_every_n):
last_step = checkpoint_step
continue
os.unlink(os.path.join(model_dir, name))
|
class Saver(object):
'Class to manage save and restore for the model and optimizer.'
def __init__(self, model, optimizer, keep_every_n=None):
self._model = model
self._optimizer = optimizer
self._keep_every_n = keep_every_n
def restore(self, model_dir, map_location=None, step=None):
'Restores model and optimizer from given directory.\n\n Returns:\n Last training step for the model restored.\n '
last_step = load_checkpoint(self._model, self._optimizer, model_dir, map_location, step)
return last_step
def save(self, model_dir, step):
'Saves model and optimizer to given directory.\n\n Args:\n model_dir: Model directory to save.\n step: Current training step.\n '
save_checkpoint(self._model, self._optimizer, step, model_dir, keep_every_n=self._keep_every_n)
def restore_part(self, other_model_dir, remap):
"Restores part of the model from other directory.\n\n Useful to initialize part of the model with another pretrained model.\n\n Args:\n other_model_dir: Model directory to load from.\n remap: dict, remapping current parameters to the other model's.\n "
load_and_map_checkpoint(self._model, other_model_dir, remap)
|
def to_dict_with_sorted_values(d, key=None):
return {k: sorted(v, key=key) for (k, v) in d.items()}
|
def to_dict_with_set_values(d):
result = {}
for (k, v) in d.items():
hashable_v = []
for v_elem in v:
if isinstance(v_elem, list):
hashable_v.append(tuple(v_elem))
else:
hashable_v.append(v_elem)
result[k] = set(hashable_v)
return result
|
def tuplify(x):
if (not isinstance(x, (tuple, list))):
return x
return tuple((tuplify(elem) for elem in x))
|
def test_argsort_properties():
for items in (tuple(range(10)), (9, 8, 7, 6, 5, 4, 3, 2, 1, 0), (0, 2, 4, 6, 8, 1, 3, 5, 7, 9), (0, 9, 1, 8, 2, 7, 3, 6, 4, 5)):
(sorted_items, sort_to_orig, orig_to_sort) = batched_sequence.argsort(items)
for i in range(len(items)):
assert (items[orig_to_sort[i]] == sorted_items[i])
assert (tuple((items[i] for i in orig_to_sort)) == sorted_items)
for i in range(len(items)):
assert (sorted_items[sort_to_orig[i]] == items[i])
assert (tuple((sorted_items[i] for i in sort_to_orig)) == items)
|
def func_1(x, y):
time.sleep((random.random() * 0.1))
return ((x * 10) + y)
|
def func_2(x, y):
return x(y)
|
def test_parallelizer():
p = parallelizer.CPUParallelizer(4)
results = list(p.parallel_map(func_1, [(2, [3, 4, 5]), (3, [4, 5, 6]), (4, [])]))
assert (results == [23, 24, 25, 34, 35, 36]), str(results)
p = parallelizer.CPUParallelizer(1)
from_parent = 1
results = list(p.parallel_map(func_2, [((lambda x: (x + from_parent)), [3, 4, 5])]))
assert (results == [4, 5, 6]), str(results)
|
class Sentinel(object):
'Used to represent special values like UNK.'
__slots__ = ('name',)
def __init__(self, name):
self.name = name
def __repr__(self):
return (('<' + self.name) + '>')
def __lt__(self, other):
if isinstance(other, IndexedSet.Sentinel):
return (self.name < other.name)
return True
|
class Vocab(collections.abc.Set):
def __init__(self, iterable, special_elems=(UNK, BOS, EOS)):
elements = list(special_elems)
elements.extend(iterable)
assert (len(elements) == len(set(elements)))
self.id_to_elem = {i: elem for (i, elem) in enumerate(elements)}
self.elem_to_id = {elem: i for (i, elem) in enumerate(elements)}
def __iter__(self):
for i in xrange(len(self)):
(yield self.id_to_elem[i])
def __contains__(self, value):
return (value in self.elem_to_id)
def __len__(self):
return len(self.elem_to_id)
def __getitem__(self, key):
if isinstance(key, slice):
raise TypeError('Slices not supported.')
return self.id_to_elem[key]
def index(self, value):
try:
return self.elem_to_id[value]
except KeyError:
return self.elem_to_id[UNK]
def indices(self, values):
return [self.index(value) for value in values]
def __hash__(self):
return id(self)
@classmethod
def load(self, in_path):
return Vocab(json.load(open(in_path)), special_elems=())
def save(self, out_path):
with open(out_path, 'w') as f:
json.dump([self.id_to_elem[i] for i in range(len(self.id_to_elem))], f)
|
class VocabBuilder():
def __init__(self, min_freq=None, max_count=None):
self.word_freq = collections.Counter()
self.min_freq = min_freq
self.max_count = max_count
def add_word(self, word, count=1):
self.word_freq[word] += count
def finish(self, *args, **kwargs):
eligible_words_and_freqs = self.word_freq.most_common(self.max_count)
if (self.min_freq is not None):
for (i, (word, freq)) in enumerate(eligible_words_and_freqs):
if (freq < self.min_freq):
eligible_words_and_freqs = eligible_words_and_freqs[:i]
break
return Vocab((word for (word, freq) in sorted(eligible_words_and_freqs)), *args, **kwargs)
|
def clean_sql_file(input_sql_file, output_sql_file):
print(input_sql_file)
f = open(input_sql_file)
f_out = open(output_sql_file, 'w')
cnt = 0
for line in f.readlines():
(sql, db) = line.split('\t')
if (not sql.endswith(';')):
sql = (sql + ';')
f_out.write((sql + '\n'))
cnt += 1
f.close()
f_out.close()
print(output_sql_file)
print(cnt)
print()
|
class Grammar(object):
def __init__(self, rules):
'\n instantiate a grammar with a set of production rules of type Rule\n '
self.rules = rules
self.rule_index = defaultdict(list)
self.rule_to_id = OrderedDict()
node_types = set()
lhs_nodes = set()
rhs_nodes = set()
for rule in self.rules:
self.rule_index[rule.parent].append(rule)
for node in rule.nodes:
node_types.add(typename(node.type))
lhs_nodes.add(rule.parent)
for child in rule.children:
rhs_nodes.add(child.as_type_node)
root_node = (lhs_nodes - rhs_nodes)
assert (len(root_node) == 1)
self.root_node = next(iter(root_node))
self.terminal_nodes = (rhs_nodes - lhs_nodes)
self.terminal_types = set([n.type for n in self.terminal_nodes])
self.node_type_to_id = OrderedDict()
for (i, type) in enumerate(node_types, start=0):
self.node_type_to_id[type] = i
for (gid, rule) in enumerate(rules, start=0):
self.rule_to_id[rule] = gid
self.id_to_rule = OrderedDict(((v, k) for (k, v) in self.rule_to_id.iteritems()))
logging.info('num. rules: %d', len(self.rules))
logging.info('num. types: %d', len(self.node_type_to_id))
logging.info('root: %s', self.root_node)
logging.info('terminals: %s', ', '.join((repr(n) for n in self.terminal_nodes)))
def __iter__(self):
return self.rules.__iter__()
def __len__(self):
return len(self.rules)
def __getitem__(self, lhs):
key_node = ASTNode(lhs.type, None)
if (key_node in self.rule_index):
return self.rule_index[key_node]
else:
KeyError(('key=%s' % key_node))
def get_node_type_id(self, node):
from astnode import ASTNode
if isinstance(node, ASTNode):
type_repr = typename(node.type)
return self.node_type_to_id[type_repr]
else:
type_repr = typename(node)
return self.node_type_to_id[type_repr]
def is_terminal(self, node):
return (node.type in self.terminal_types)
def is_value_node(self, node):
raise NotImplementedError
|
class IFTTTGrammar(Grammar):
def __init__(self, rules):
super(IFTTTGrammar, self).__init__(rules)
def is_value_node(self, node):
return False
|
def is_builtin_type(x):
return ((x == str) or (x == int) or (x == float) or (x == bool) or (x == object) or (x == 'identifier'))
|
def is_terminal_ast_type(x):
if (inspect.isclass(x) and (x in TERMINAL_AST_TYPES)):
return True
return False
|
def type_str_to_type(type_str):
if (type_str.endswith('*') or (type_str == 'root') or (type_str == 'epsilon')):
return type_str
else:
try:
type_obj = eval(type_str)
if is_builtin_type(type_obj):
return type_obj
except:
pass
try:
type_obj = eval(('ast.' + type_str))
return type_obj
except:
raise RuntimeError(('unidentified type string: %s' % type_str))
|
def is_compositional_leaf(node):
is_leaf = True
for (field_name, field_value) in ast.iter_fields(node):
if (field_name in NODE_FIELD_BLACK_LIST):
continue
if (field_value is None):
is_leaf &= True
elif (isinstance(field_value, list) and (len(field_value) == 0)):
is_leaf &= True
else:
is_leaf &= False
return is_leaf
|
class PythonGrammar(Grammar):
def __init__(self, rules):
super(PythonGrammar, self).__init__(rules)
def is_value_node(self, node):
return is_builtin_type(node.type)
|
def is_builtin_type(x):
return ((x == str) or (x == int) or (x == float) or (x == bool) or (x == object) or (x == 'identifier'))
|
def type_str_to_type(type_str):
return type_str
|
def is_compositional_leaf(node):
is_leaf = True
for (field_name, field_value) in ast.iter_fields(node):
if (field_name in NODE_FIELD_BLACK_LIST):
continue
if (field_value is None):
is_leaf &= True
elif (isinstance(field_value, list) and (len(field_value) == 0)):
is_leaf &= True
else:
is_leaf &= False
return is_leaf
|
class SQLGrammar(Grammar):
def __init__(self, rules):
super(SQLGrammar, self).__init__(rules)
def is_value_node(self, node):
return (node.type in TERMINAL_AST_TYPES)
|
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
|
def variableFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
result = Variable(torch.LongTensor(indexes).view((- 1), 1))
if use_cuda:
return result.cuda()
else:
return result
|
def variablesFromPair(pair):
input_variable = variableFromSentence(input_lang, pair[0])
target_variable = variableFromSentence(output_lang, pair[1])
return (input_variable, target_variable)
|
def train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH, use_attention=True):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = (encoder_outputs.cuda() if use_cuda else encoder_outputs)
loss = 0
for ei in range(input_length):
(encoder_output, encoder_hidden) = encoder(input_variable[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_token]]))
decoder_input = (decoder_input.cuda() if use_cuda else decoder_input)
decoder_hidden = encoder_hidden
use_teacher_forcing = (True if (random.random() < teacher_forcing_ratio) else False)
if use_teacher_forcing:
for di in range(target_length):
if use_attention:
(decoder_output, decoder_hidden, decoder_attention) = decoder(decoder_input, decoder_hidden, encoder_outputs)
else:
(decoder_output, decoder_hidden) = decoder(decoder_input, decoder_hidden)
loss += criterion(decoder_output, target_variable[di])
decoder_input = target_variable[di]
else:
for di in range(target_length):
if use_attention:
(decoder_output, decoder_hidden, decoder_attention) = decoder(decoder_input, decoder_hidden, encoder_outputs)
else:
(decoder_output, decoder_hidden) = decoder(decoder_input, decoder_hidden)
(topv, topi) = decoder_output.data.topk(1)
ni = topi[0][0]
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = (decoder_input.cuda() if use_cuda else decoder_input)
loss += criterion(decoder_output, target_variable[di])
if (ni == EOS_token):
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return (loss.data[0] / target_length)
|
def asMinutes(s):
m = math.floor((s / 60))
s -= (m * 60)
return ('%dm %ds' % (m, s))
|
def timeSince(since, percent):
now = time.time()
s = (now - since)
es = (s / percent)
rs = (es - s)
return ('%s (- %s)' % (asMinutes(s), asMinutes(rs)))
|
def trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01, use_attention=True):
start = time.time()
plot_losses = []
print_loss_total = 0
plot_loss_total = 0
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
training_pairs = [variablesFromPair(random.choice(train_pairs)) for i in range(n_iters)]
criterion = nn.NLLLoss()
for iter in range(1, (n_iters + 1)):
training_pair = training_pairs[(iter - 1)]
input_variable = training_pair[0]
target_variable = training_pair[1]
loss = train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, use_attention=use_attention)
print_loss_total += loss
plot_loss_total += loss
if ((iter % print_every) == 0):
print_loss_avg = (print_loss_total / print_every)
print_loss_total = 0
print(('%s (%d %d%%) %.4f' % (timeSince(start, (iter / n_iters)), iter, ((iter / n_iters) * 100), print_loss_avg)))
if ((iter % plot_every) == 0):
plot_loss_avg = (plot_loss_total / plot_every)
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
|
def showPlot(points):
plt.figure()
(fig, ax) = plt.subplots()
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
|
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH, use_attention=True):
input_variable = variableFromSentence(input_lang, sentence)
input_length = input_variable.size()[0]
encoder_hidden = encoder.initHidden()
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = (encoder_outputs.cuda() if use_cuda else encoder_outputs)
for ei in range(input_length):
(encoder_output, encoder_hidden) = encoder(input_variable[ei], encoder_hidden)
encoder_outputs[ei] = (encoder_outputs[ei] + encoder_output[0][0])
decoder_input = Variable(torch.LongTensor([[SOS_token]]))
decoder_input = (decoder_input.cuda() if use_cuda else decoder_input)
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
if use_attention:
(decoder_output, decoder_hidden, decoder_attention) = decoder(decoder_input, decoder_hidden, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
else:
(decoder_output, decoder_hidden) = decoder(decoder_input, decoder_hidden)
(topv, topi) = decoder_output.data.topk(1)
ni = topi[0][0]
if (ni == EOS_token):
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_lang.index2word[ni])
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = (decoder_input.cuda() if use_cuda else decoder_input)
return (decoded_words, decoder_attentions[:(di + 1)])
|
def evaluateRandomly(encoder, decoder, use_attention=True, n=10):
for i in range(n):
pair = random.choice(train_pairs)
print('>', pair[0])
print('=', pair[1])
(output_words, attentions) = evaluate(encoder, decoder, pair[0], use_attention=use_attention)
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
|
def evaluateAll(encoder, decoder, split='dev', use_attention=True):
if use_attention:
model = 'attention'
else:
model = 'simple'
if (split == 'train'):
eval_pairs = train_pairs
elif (split == 'dev'):
eval_pairs = dev_pairs
translated_sentences = []
target_sentences = []
f_out = open(('%s_%s_out.txt' % (split, model)), 'w')
for i in range(len(eval_pairs)):
pair = eval_pairs[i]
f_out.write((('> ' + pair[0]) + '\n'))
f_out.write((('= ' + pair[1]) + '\n'))
target_sentences.append(pair[1])
(output_words, attentions) = evaluate(encoder, decoder, pair[0], use_attention=use_attention)
output_sentence = ' '.join(output_words)
f_out.write((('< ' + output_sentence) + '\n\n'))
translated_sentences.append(output_sentence)
f_out.close()
bleu_score = get_bleu_score(translated_sentences, target_sentences)
print('BLEU score: ', bleu_score)
|
def extract_grammar(code_file, prefix='py'):
line_num = 0
parse_trees = []
for line in open(code_file):
code = line.strip()
parse_tree = parse(code)
parse_trees.append(parse_tree)
ast_tree = parse_tree_to_python_ast(parse_tree)
ref_ast_tree = ast.parse(canonicalize_code(code)).body[0]
source1 = astor.to_source(ast_tree)
source2 = astor.to_source(ref_ast_tree)
assert (source1 == source2)
line_num += 1
print(('total line of code: %d' % line_num))
grammar = get_grammar(parse_trees)
with open((prefix + '.grammar.txt'), 'w') as f:
for rule in grammar:
str = rule.__repr__()
f.write((str + '\n'))
with open((prefix + '.parse_trees.txt'), 'w') as f:
for tree in parse_trees:
f.write((tree.__repr__() + '\n'))
return (grammar, parse_trees)
|
def rule_vs_node_stat():
line_num = 0
parse_trees = []
code_file = '/Users/yinpengcheng/Research/SemanticParsing/CodeGeneration/card_datasets/hearthstone/all_hs.out'
node_nums = rule_nums = 0.0
for line in open(code_file):
code = line.replace('§', '\n').strip()
parse_tree = parse(code)
node_nums += len(list(parse_tree.nodes))
(rules, _) = parse_tree.get_productions()
rule_nums += len(rules)
parse_trees.append(parse_tree)
line_num += 1
print(('avg. nums of nodes: %f' % (node_nums / line_num)))
print(('avg. nums of rules: %f' % (rule_nums / line_num)))
|
def process_heart_stone_dataset():
data_file = '/Users/yinpengcheng/Research/SemanticParsing/CodeGeneration/card_datasets/hearthstone/all_hs.out'
parse_trees = []
rule_num = 0.0
example_num = 0
for line in open(data_file):
code = line.replace('§', '\n').strip()
parse_tree = parse(code)
pred_ast = parse_tree_to_python_ast(parse_tree)
pred_code = astor.to_source(pred_ast)
ref_ast = ast.parse(code)
ref_code = astor.to_source(ref_ast)
if (pred_code != ref_code):
raise RuntimeError('code mismatch!')
(rules, _) = parse_tree.get_productions(include_value_node=False)
rule_num += len(rules)
example_num += 1
parse_trees.append(parse_tree)
grammar = get_grammar(parse_trees)
with open('hs.grammar.txt', 'w') as f:
for rule in grammar:
str = rule.__repr__()
f.write((str + '\n'))
with open('hs.parse_trees.txt', 'w') as f:
for tree in parse_trees:
f.write((tree.__repr__() + '\n'))
print(('avg. nums of rules: %f' % (rule_num / example_num)))
|
def canonicalize_sql_example(query, sql, ast):
query = re.sub('<.*?>', '', query)
query_tokens = nltk.word_tokenize(query)
parse_tree = parse_raw(ast)
return (query_tokens, sql, parse_tree)
|
def preprocess_sql_dataset(data_file, ast_file):
f = open('sql_dataset.examples.txt', 'w')
ast_data = json.load(open(ast_file, 'r'))
data = json.load(open(data_file))
ast_data = ast_data['statement']
examples = []
for (idx, (item, ast)) in enumerate(zip(data, ast_data)):
nl = item['question'].lower()
sql = ' '.join(item['query_toks_no_value'])
(clean_query_tokens, clean_code, parse_tree) = canonicalize_sql_example(nl, sql, ast)
example = {'id': idx, 'query_tokens': clean_query_tokens, 'code': clean_code, 'parse_tree': parse_tree, 'str_map': None, 'raw_code': sql, 'db_id': item['db_id']}
examples.append(example)
f.write((('*' * 50) + '\n'))
f.write(('example# %d\n' % idx))
f.write((' '.join(clean_query_tokens).encode('utf-8') + '\n'))
f.write('\n')
f.write((clean_code + '\n'))
f.write((('*' * 50) + '\n'))
idx += 1
f.close()
print(('preprocess_dataset: cleaned example num: %d' % len(examples)))
return examples
|
def get_terminal_tokens(_terminal_str):
'\n get terminal tokens\n break words like MinionCards into [Minion, Cards]\n '
tmp_terminal_tokens = [t for t in _terminal_str.split(' ') if (len(t) > 0)]
_terminal_tokens = []
for token in tmp_terminal_tokens:
sub_tokens = re.sub('([a-z])([A-Z])', '\\1 \\2', token).split(' ')
_terminal_tokens.extend(sub_tokens)
_terminal_tokens.append(' ')
return _terminal_tokens[:(- 1)]
|
def load_table_schema_data(inputfile):
data = json.load(open(inputfile))
terminal_tokens = []
db_dict = dict()
for db in data:
db_dict[db['db_id']] = db
for col in db['column_names_original']:
terminal_tokens.append(col[1])
for table in db['table_names_original']:
terminal_tokens.append(table)
return (db_dict, set(terminal_tokens))
|
def gen_db_mask(vocab, non_schema_vocab_size, db_file):
db_dict = dict()
vocab_size = vocab.size
data = json.load(open(db_file))
for db in data:
mask = np.zeros(vocab_size, dtype='int32')
mask[:non_schema_vocab_size] = 1
for col in db['column_names_original']:
idx = vocab[col[1]]
mask[idx] = 1
for table in db['table_names_original']:
idx = vocab[table]
mask[idx] = 1
db_dict[db['db_id']] = mask
return db_dict
|
def parse_train_dataset(args):
MAX_QUERY_LENGTH = 70
WORD_FREQ_CUT_OFF = 0
train_data = preprocess_sql_dataset(args.train_data, args.train_data_ast)
dev_data = preprocess_sql_dataset(args.dev_data, args.dev_data_ast)
test_data = preprocess_sql_dataset(args.test_data, args.test_data_ast)
data = ((train_data + dev_data) + test_data)
print('data size: {}'.format(len(data)))
parse_trees = [e['parse_tree'] for e in data]
grammar = get_grammar(parse_trees)
with open('sql.grammar.unary_closure.txt', 'w') as f:
for rule in grammar:
f.write((rule.__repr__() + '\n'))
nl_tokens = list(chain(*[e['query_tokens'] for e in data]))
nl_vocab = gen_vocab(nl_tokens, vocab_size=5000, freq_cutoff=WORD_FREQ_CUT_OFF)
all_terminal_tokens = []
for entry in data:
parse_tree = entry['parse_tree']
for node in parse_tree.get_leaves():
if grammar.is_value_node(node):
terminal_val = node.value
terminal_str = str(terminal_val)
terminal_tokens = get_terminal_tokens(terminal_str)
for terminal_token in terminal_tokens:
assert (len(terminal_token) > 0)
all_terminal_tokens.append(terminal_token)
table_schema = args.table_schema
terminal_vocab = gen_vocab(all_terminal_tokens, vocab_size=5000, freq_cutoff=WORD_FREQ_CUT_OFF)
non_schema_vocab_size = terminal_vocab.size
(db_dict, schema_vocab) = load_table_schema_data(table_schema)
terminal_vocab = gen_schema_vocab(schema_vocab, terminal_vocab)
db_mask = gen_db_mask(terminal_vocab, non_schema_vocab_size, table_schema)
train_data = DataSet(nl_vocab, terminal_vocab, grammar, db_mask, 'sql.train_data')
dev_data = DataSet(nl_vocab, terminal_vocab, grammar, db_mask, 'sql.dev_data')
test_data = DataSet(nl_vocab, terminal_vocab, grammar, db_mask, 'sql.test_data')
all_examples = []
can_fully_reconstructed_examples_num = 0
examples_with_empty_actions_num = 0
for (index, entry) in enumerate(data):
idx = entry['id']
query_tokens = entry['query_tokens']
code = entry['code']
parse_tree = entry['parse_tree']
(rule_list, rule_parents) = parse_tree.get_productions(include_value_node=True)
actions = []
can_fully_reconstructed = True
rule_pos_map = dict()
for (rule_count, rule) in enumerate(rule_list):
if (not grammar.is_value_node(rule.parent)):
assert (rule.value is None), rule.value
parent_rule = rule_parents[(rule_count, rule)][0]
if parent_rule:
parent_t = rule_pos_map[parent_rule]
else:
parent_t = 0
rule_pos_map[rule] = len(actions)
d = {'rule': rule, 'parent_t': parent_t, 'parent_rule': parent_rule}
action = Action(APPLY_RULE, d)
actions.append(action)
else:
assert rule.is_leaf, (rule.type, rule.value, rule.label)
parent_rule = rule_parents[(rule_count, rule)][0]
parent_t = rule_pos_map[parent_rule]
terminal_val = rule.value
terminal_str = str(terminal_val)
terminal_tokens = get_terminal_tokens(terminal_str)
for terminal_token in terminal_tokens:
term_tok_id = terminal_vocab[terminal_token]
tok_src_idx = (- 1)
try:
tok_src_idx = query_tokens.index(terminal_token)
except ValueError:
pass
d = {'literal': terminal_token, 'rule': rule, 'parent_rule': parent_rule, 'parent_t': parent_t}
if ((tok_src_idx < 0) or (tok_src_idx >= MAX_QUERY_LENGTH)):
action = Action(GEN_TOKEN, d)
if (terminal_token not in terminal_vocab):
if (terminal_token not in query_tokens):
can_fully_reconstructed = False
elif (term_tok_id != terminal_vocab.unk):
d['source_idx'] = tok_src_idx
action = Action(GEN_COPY_TOKEN, d)
else:
d['source_idx'] = tok_src_idx
action = Action(COPY_TOKEN, d)
actions.append(action)
d = {'literal': '<eos>', 'rule': rule, 'parent_rule': parent_rule, 'parent_t': parent_t}
actions.append(Action(GEN_TOKEN, d))
if (len(actions) == 0):
examples_with_empty_actions_num += 1
continue
mask = db_mask[entry['db_id']]
example = DataEntry(idx, query_tokens, parse_tree, code, actions, mask, {'str_map': None, 'raw_code': entry['raw_code']})
if can_fully_reconstructed:
can_fully_reconstructed_examples_num += 1
if (0 <= index < args.train_data_size):
train_data.add(example)
elif (index < (args.train_data_size + args.dev_data_size)):
dev_data.add(example)
else:
test_data.add(example)
all_examples.append(example)
max_query_len = max((len(e.query) for e in all_examples))
max_actions_len = max((len(e.actions) for e in all_examples))
logging.info('examples that can be fully reconstructed: %d/%d=%f', can_fully_reconstructed_examples_num, len(all_examples), (can_fully_reconstructed_examples_num / len(all_examples)))
logging.info('empty_actions_count: %d', examples_with_empty_actions_num)
logging.info('max_query_len: %d', max_query_len)
logging.info('max_actions_len: %d', max_actions_len)
train_data.init_data_matrices(max_query_length=70, max_example_action_num=350)
dev_data.init_data_matrices(max_query_length=70, max_example_action_num=350)
test_data.init_data_matrices(max_query_length=70, max_example_action_num=350)
print('train data size:{}'.format(train_data.count))
print('dev data size:{}'.format(dev_data.count))
print('test data size:{}'.format(test_data.count))
serialize_to_file((train_data, dev_data, test_data), args.output_path)
return (train_data, dev_data, test_data)
|
def dump_data_for_evaluation(data_type='django', data_file='', max_query_length=70):
(train_data, dev_data, test_data) = deserialize_from_file(data_file)
prefix = '/Users/yinpengcheng/Projects/dl4mt-tutorial/codegen_data/'
for (dataset, output) in [(train_data, (prefix + ('%s.train' % data_type))), (dev_data, (prefix + ('%s.dev' % data_type))), (test_data, (prefix + ('%s.test' % data_type)))]:
f_source = open((output + '.desc'), 'w')
f_target = open((output + '.code'), 'w')
for e in dataset.examples:
query_tokens = e.query[:max_query_length]
code = e.code
if (data_type == 'django'):
target_code = de_canonicalize_code_for_seq2seq(code, e.meta_data['raw_code'])
else:
target_code = code
target_code = target_code.strip()
tokenized_target = tokenize_code_adv(target_code, breakCamelStr=(False if (data_type == 'django') else True))
tokenized_target = [tk.replace('\n', '#NEWLINE#') for tk in tokenized_target]
tokenized_target = [tk for tk in tokenized_target if (tk is not None)]
while (tokenized_target[(- 1)] == '#INDENT#'):
tokenized_target = tokenized_target[:(- 1)]
f_source.write((' '.join(query_tokens) + '\n'))
f_target.write((' '.join(tokenized_target) + '\n'))
f_source.close()
f_target.close()
|
def typename(x):
if isinstance(x, basestring):
return x
return x.__name__
|
def escape(text):
text = text.replace('"', '-``-').replace("'", '-`-').replace(' ', '-SP-').replace('\t', '-TAB-').replace('\n', '-NL-').replace('\r', '-NL2-').replace('(', '-LRB-').replace(')', '-RRB-').replace('|', '-BAR-')
if (text is None):
return '-NONE-'
elif (text == ''):
return '-EMPTY-'
return text
|
def unescape(text):
if (text == '-NONE-'):
return None
text = text.replace('-``-', '"').replace('-`-', "'").replace('-SP-', ' ').replace('-TAB-', '\t').replace('-NL-', '\n').replace('-NL2-', '\r').replace('-LRB-', '(').replace('-RRB-', ')').replace('-BAR-', '|').replace('-EMPTY-', '')
return text
|
def softmax(x):
return T.nnet.softmax(x.reshape(((- 1), x.shape[(- 1)]))).reshape(x.shape)
|
def time_distributed_softmax(x):
import warnings
warnings.warn('time_distributed_softmax is deprecated. Just use softmax!', DeprecationWarning)
return softmax(x)
|
def softplus(x):
return T.nnet.softplus(x)
|
def relu(x):
return T.nnet.relu(x)
|
def tanh(x):
return T.tanh(x)
|
def sigmoid(x):
return T.nnet.sigmoid(x)
|
def hard_sigmoid(x):
return T.nnet.hard_sigmoid(x)
|
def linear(x):
'\n The function returns the variable that is passed in, so all types work\n '
return x
|
def get(identifier):
return get_from_module(identifier, globals(), 'activation function')
|
class Convolution2d(Layer):
'a convolutional layer with max pooling'
def __init__(self, max_sent_len, word_embed_dim, filter_num, filter_window_size, border_mode='valid', activation='relu', name='Convolution2d'):
super(Convolution2d, self).__init__()
self.init = initializations.get('uniform')
self.activation = activations.get(activation)
self.border_mode = border_mode
self.W = self.init((filter_num, 1, filter_window_size, word_embed_dim), scale=0.01, name='W')
self.b = shared_zeros(filter_num, name='b')
self.params = [self.W, self.b]
if (self.border_mode == 'valid'):
self.ds = (((max_sent_len - filter_window_size) + 1), 1)
elif (self.border_mode == 'full'):
self.ds = (((max_sent_len + filter_window_size) - 1), 1)
if (name is not None):
self.set_name(name)
def __call__(self, X):
conv_output = conv.conv2d(X.reshape((X.shape[0], 1, X.shape[1], X.shape[2])), filters=self.W, filter_shape=self.W.shape.eval(), border_mode=self.border_mode)
output = self.activation((conv_output + self.b.dimshuffle(('x', 0, 'x', 'x'))))
output = pool.pool_2d(output, ds=self.ds, ignore_border=True, mode='max')
output = output.flatten(2)
return output
|
class Layer(object):
def __init__(self):
self.params = []
def init_updates(self):
self.updates = []
def __call__(self, X):
return X
def supports_masked_input(self):
' Whether or not this layer respects the output mask of its previous layer in its calculations. If you try\n to attach a layer that does *not* support masked_input to a layer that gives a non-None output_mask() that is\n an error'
return False
def get_output_mask(self, train=None):
'\n For some models (such as RNNs) you want a way of being able to mark some output data-points as\n "masked", so they are not used in future calculations. In such a model, get_output_mask() should return a mask\n of one less dimension than get_output() (so if get_output is (nb_samples, nb_timesteps, nb_dimensions), then the mask\n is (nb_samples, nb_timesteps), with a one for every unmasked datapoint, and a zero for every masked one.\n\n If there is *no* masking then it shall return None. For instance if you attach an Activation layer (they support masking)\n to a layer with an output_mask, then that Activation shall also have an output_mask. If you attach it to a layer with no\n such mask, then the Activation\'s get_output_mask shall return None.\n\n Some layers have an output_mask even if their input is unmasked, notably Embedding which can turn the entry "0" into\n a mask.\n '
return None
def set_weights(self, weights):
for (p, w) in zip(self.params, weights):
if (p.eval().shape != w.shape):
raise Exception(('Layer shape %s not compatible with weight shape %s.' % (p.eval().shape, w.shape)))
p.set_value(floatX(w))
def get_weights(self):
weights = []
for p in self.params:
weights.append(p.get_value())
return weights
def get_params(self):
return self.params
def set_name(self, name):
if name:
for i in range(len(self.params)):
if (self.params[i].name is None):
self.params[i].name = ('%s_p%d' % (name, i))
else:
self.params[i].name = ((name + '_') + self.params[i].name)
self.name = name
|
class MaskedLayer(Layer):
'\n If your layer trivially supports masking (by simply copying the input mask to the output), then subclass MaskedLayer\n instead of Layer, and make sure that you incorporate the input mask into your calculation of get_output()\n '
def supports_masked_input(self):
return True
|
class Dense(Layer):
def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='tanh', name='Dense'):
super(Dense, self).__init__()
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.input_dim = input_dim
self.output_dim = output_dim
self.input = T.matrix()
self.W = self.init((self.input_dim, self.output_dim))
self.b = shared_zeros(self.output_dim)
self.params = [self.W, self.b]
if (name is not None):
self.set_name(name)
def set_name(self, name):
self.W.name = ('%s_W' % name)
self.b.name = ('%s_b' % name)
def __call__(self, X):
output = self.activation((T.dot(X, self.W) + self.b))
return output
|
class Dropout(Layer):
def __init__(self, p, srng, name='dropout'):
super(Dropout, self).__init__()
assert (0.0 < p < 1.0)
self.p = p
self.srng = srng
if (name is not None):
self.set_name(name)
def __call__(self, X, train_only=True):
retain_prob = (1.0 - self.p)
X_train = (X * self.srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX))
X_test = (X * retain_prob)
if train_only:
return X_train
else:
return (X_train, X_test)
|
class WordDropout(Layer):
def __init__(self, p, srng, name='WordDropout'):
super(WordDropout, self).__init__()
self.p = p
self.srng = srng
def __call__(self, X, train_only=True):
retain_prob = (1.0 - self.p)
mask = self.srng.binomial(X.shape[:(- 1)], p=retain_prob, dtype=theano.config.floatX)
X_train = (X * T.shape_padright(mask))
if train_only:
return X_train
else:
return (X_train, X)
|
def get_embed_iter(file_path):
for line in open(file_path):
line = line.strip()
data = line.split(' ')
word = data[0]
embed = np.asarray([float(e) for e in data[1:]], dtype='float32')
(yield (word, embed))
|
class Embedding(Layer):
'\n Turn positive integers (indexes) into denses vectors of fixed size.\n eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]\n\n @input_dim: size of vocabulary (highest input integer + 1)\n @out_dim: size of dense representation\n '
def __init__(self, input_dim, output_dim, init='uniform', name=None):
super(Embedding, self).__init__()
self.init = initializations.get(init)
self.input_dim = input_dim
self.output_dim = output_dim
self.W = self.init((self.input_dim, self.output_dim), scale=0.1)
self.params = [self.W]
if (name is not None):
self.set_name(name)
def get_output_mask(self, X):
return (T.ones_like(X) * (1 - T.eq(X, 0))).astype('int8')
def init_pretrained(self, file_path, vocab):
W = self.W.get_value(borrow=True)
inited_words = set()
for (word, embed) in get_embed_iter(file_path):
if (word in vocab):
idx = vocab[word]
W[idx] = embed
inited_words.add(word)
return inited_words
def __call__(self, X, mask_zero=False):
out = self.W[X]
if mask_zero:
return (out, self.get_output_mask(X))
else:
return out
|
class HybridEmbedding(Layer):
'\n Turn positive integers (indexes) into denses vectors of fixed size.\n eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]\n\n @input_dim: size of vocabulary (highest input integer + 1)\n @out_dim: size of dense representation\n '
def __init__(self, embed_size, unfixed_embed_size, embed_dim, init='uniform', name='HybridEmbedding'):
super(HybridEmbedding, self).__init__()
self.init = initializations.get(init)
self.unfixed_embed_size = unfixed_embed_size
self.W_unfixed = self.init((embed_size, embed_dim))
self.W_fixed = self.init((embed_size, embed_dim))
self.W_fixed.name = 'HybridEmbedding_fiexed_embed_matrix'
self.params = [self.W_unfixed]
if (name is not None):
self.set_name(name)
def get_output_mask(self, X):
return (T.ones_like(X) * (1 - T.eq(X, 0)))
def __call__(self, X, mask_zero=False):
cond = T.lt(X, self.unfixed_embed_size)
out = T.switch(T.shape_padright(cond), self.W_unfixed[X], self.W_fixed[X])
if mask_zero:
return (out, self.get_output_mask(X))
else:
return out
|
def mean_squared_error(y_true, y_pred):
return T.sqr((y_pred - y_true)).mean(axis=(- 1))
|
def mean_absolute_error(y_true, y_pred):
return T.abs_((y_pred - y_true)).mean(axis=(- 1))
|
def mean_absolute_percentage_error(y_true, y_pred):
return (T.abs_(((y_true - y_pred) / T.clip(T.abs_(y_true), epsilon, np.inf))).mean(axis=(- 1)) * 100.0)
|
def mean_squared_logarithmic_error(y_true, y_pred):
return T.sqr((T.log((T.clip(y_pred, epsilon, np.inf) + 1.0)) - T.log((T.clip(y_true, epsilon, np.inf) + 1.0)))).mean(axis=(- 1))
|
def squared_hinge(y_true, y_pred):
return T.sqr(T.maximum((1.0 - (y_true * y_pred)), 0.0)).mean(axis=(- 1))
|
def hinge(y_true, y_pred):
return T.maximum((1.0 - (y_true * y_pred)), 0.0).mean(axis=(- 1))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.