code
stringlengths 17
6.64M
|
---|
def categorical_crossentropy(y_true, y_pred):
'Expects a binary class matrix instead of a vector of scalar classes\n '
y_pred = T.clip(y_pred, epsilon, (1.0 - epsilon))
y_pred /= y_pred.sum(axis=(- 1), keepdims=True)
cce = T.nnet.categorical_crossentropy(y_pred, y_true)
return cce
|
def binary_crossentropy(y_true, y_pred):
y_pred = T.clip(y_pred, epsilon, (1.0 - epsilon))
bce = T.nnet.binary_crossentropy(y_pred, y_true).mean(axis=(- 1))
return bce
|
def poisson_loss(y_true, y_pred):
return T.mean((y_pred - (y_true * T.log((y_pred + epsilon)))), axis=(- 1))
|
def get(identifier):
return get_from_module(identifier, globals(), 'objective')
|
def clip_norm(g, c, n):
if (c > 0):
g = T.switch(T.ge(n, c), ((g * c) / n), g)
return g
|
def kl_divergence(p, p_hat):
return ((p_hat - p) + (p * T.log((p / p_hat))))
|
class Optimizer(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
self.updates = []
def get_state(self):
return [u[0].get_value() for u in self.updates]
def set_state(self, value_list):
assert (len(self.updates) == len(value_list))
for (u, v) in zip(self.updates, value_list):
u[0].set_value(floatX(v))
def get_updates(self, params, constraints, loss, **kwargs):
raise NotImplementedError
def get_gradients(self, loss, params, **kwargs):
grads = T.grad(loss, params, disconnected_inputs='warn', **kwargs)
if (hasattr(self, 'clip_grad') and (self.clip_grad > 0)):
norm = T.sqrt(sum([T.sum((g ** 2)) for g in grads]))
grads = [clip_norm(g, self.clip_grad, norm) for g in grads]
return grads
def get_config(self):
return {'name': self.__class__.__name__}
|
class SGD(Optimizer):
def __init__(self, lr=0.01, momentum=0.0, decay=0.0, nesterov=False, *args, **kwargs):
super(SGD, self).__init__(**kwargs)
self.__dict__.update(locals())
self.iterations = shared_scalar(0)
self.lr = shared_scalar(lr)
self.momentum = shared_scalar(momentum)
def get_updates(self, params, loss):
grads = self.get_gradients(loss, params)
lr = (self.lr * (1.0 / (1.0 + (self.decay * self.iterations))))
self.updates = [(self.iterations, (self.iterations + 1.0))]
for (p, g) in zip(params, grads):
m = shared_zeros(p.get_value().shape)
v = ((self.momentum * m) - (lr * g))
self.updates.append((m, v))
if self.nesterov:
new_p = ((p + (self.momentum * v)) - (lr * g))
else:
new_p = (p + v)
self.updates.append((p, new_p))
return self.updates
def get_config(self):
return {'name': self.__class__.__name__, 'lr': float(self.lr.get_value()), 'momentum': float(self.momentum.get_value()), 'decay': float(self.decay.get_value()), 'nesterov': self.nesterov}
|
class RMSprop(Optimizer):
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-06, *args, **kwargs):
super(RMSprop, self).__init__(**kwargs)
self.__dict__.update(locals())
self.lr = shared_scalar(lr)
self.rho = shared_scalar(rho)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
self.updates = []
for (p, g, a, c) in zip(params, grads, accumulators, constraints):
new_a = ((self.rho * a) + ((1 - self.rho) * (g ** 2)))
self.updates.append((a, new_a))
new_p = (p - ((self.lr * g) / T.sqrt((new_a + self.epsilon))))
self.updates.append((p, c(new_p)))
return self.updates
def get_config(self):
return {'name': self.__class__.__name__, 'lr': float(self.lr.get_value()), 'rho': float(self.rho.get_value()), 'epsilon': self.epsilon}
|
class Adagrad(Optimizer):
def __init__(self, lr=0.01, epsilon=1e-06, *args, **kwargs):
super(Adagrad, self).__init__(**kwargs)
self.__dict__.update(locals())
self.lr = shared_scalar(lr)
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
self.updates = []
for (p, g, a, c) in zip(params, grads, accumulators, constraints):
new_a = (a + (g ** 2))
self.updates.append((a, new_a))
new_p = (p - ((self.lr * g) / T.sqrt((new_a + self.epsilon))))
self.updates.append((p, c(new_p)))
return self.updates
def get_config(self):
return {'name': self.__class__.__name__, 'lr': float(self.lr.get_value()), 'epsilon': self.epsilon}
|
class Adadelta(Optimizer):
'\n Reference: http://arxiv.org/abs/1212.5701\n '
def __init__(self, lr=1.0, rho=0.95, epsilon=1e-06, *args, **kwargs):
super(Adadelta, self).__init__(**kwargs)
self.__dict__.update(locals())
self.lr = shared_scalar(lr)
def get_updates(self, params, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
delta_accumulators = [shared_zeros(p.get_value().shape) for p in params]
self.updates = []
for (p, g, a, d_a) in zip(params, grads, accumulators, delta_accumulators):
new_a = ((self.rho * a) + ((1 - self.rho) * (g ** 2)))
self.updates.append((a, new_a))
update = ((g * T.sqrt((d_a + self.epsilon))) / T.sqrt((new_a + self.epsilon)))
new_p = (p - (self.lr * update))
self.updates.append((p, new_p))
new_d_a = ((self.rho * d_a) + ((1 - self.rho) * (update ** 2)))
self.updates.append((d_a, new_d_a))
return (self.updates, grads)
def get_config(self):
return {'name': self.__class__.__name__, 'lr': float(self.lr.get_value()), 'rho': self.rho, 'epsilon': self.epsilon}
|
class Adadelta_GaussianNoise(Optimizer):
'\n Reference: http://arxiv.org/abs/1212.5701\n '
def __init__(self, lr=1.0, rho=0.95, epsilon=1e-06, *args, **kwargs):
super(Adadelta_GaussianNoise, self).__init__(**kwargs)
self.__dict__.update(locals())
self.lr = shared_scalar(lr)
self.rng = MRG_RandomStreams(use_cuda=config.get('run.use_cuda'))
def get_updates(self, params, loss):
grads = self.get_gradients(loss, params)
accumulators = [shared_zeros(p.get_value().shape) for p in params]
delta_accumulators = [shared_zeros(p.get_value().shape) for p in params]
self.updates = []
n_step = theano.shared(1.0)
self.updates.append((n_step, (n_step + 1)))
for (p, g, a, d_a) in zip(params, grads, accumulators, delta_accumulators):
g_noise = self.rng.normal(p.shape, 0, T.sqrt((n_step ** (- 0.55))), dtype='float32')
g_deviated = (g + g_noise)
new_a = ((self.rho * a) + ((1 - self.rho) * (g_deviated ** 2)))
self.updates.append((a, new_a))
update = ((g_deviated * T.sqrt((d_a + self.epsilon))) / T.sqrt((new_a + self.epsilon)))
new_p = (p - (self.lr * update))
self.updates.append((p, new_p))
new_d_a = ((self.rho * d_a) + ((1 - self.rho) * (update ** 2)))
self.updates.append((d_a, new_d_a))
return self.updates
def get_config(self):
return {'name': self.__class__.__name__, 'lr': float(self.lr.get_value()), 'rho': self.rho, 'epsilon': self.epsilon}
|
class Adam(Optimizer):
'\n Reference: http://arxiv.org/abs/1412.6980v8\n\n Default parameters follow those provided in the original paper.\n '
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, *args, **kwargs):
super(Adam, self).__init__(**kwargs)
self.__dict__.update(locals())
self.iterations = shared_scalar(0)
self.lr = shared_scalar(lr)
def get_updates(self, params, loss, **kwargs):
grads = self.get_gradients(loss, params, **kwargs)
self.updates = [(self.iterations, (self.iterations + 1.0))]
t = (self.iterations + 1)
lr_t = ((self.lr * T.sqrt((1 - (self.beta_2 ** t)))) / (1 - (self.beta_1 ** t)))
gradients = []
for (p, g) in zip(params, grads):
m = theano.shared((p.get_value() * 0.0))
v = theano.shared((p.get_value() * 0.0))
g_deviated = g
gradients.append(g)
m_t = ((self.beta_1 * m) + ((1 - self.beta_1) * g_deviated))
v_t = ((self.beta_2 * v) + ((1 - self.beta_2) * (g_deviated ** 2)))
p_t = (p - ((lr_t * m_t) / (T.sqrt(v_t) + self.epsilon)))
self.updates.append((m, m_t))
self.updates.append((v, v_t))
self.updates.append((p, p_t))
return (self.updates, gradients)
def get_config(self):
return {'name': self.__class__.__name__, 'lr': float(self.lr.get_value()), 'beta_1': self.beta_1, 'beta_2': self.beta_2, 'epsilon': self.epsilon}
|
def get(identifier, kwargs=None):
return get_from_module(identifier, globals(), 'optimizer', instantiate=True, kwargs=kwargs)
|
class MetaConfig(type):
def __getitem__(self, key):
return config._config[key]
def __setitem__(self, key, value):
config._config[key] = value
|
class config(object):
_config = {}
__metaclass__ = MetaConfig
@staticmethod
def set(key, val):
config._config[key] = val
@staticmethod
def init_config(file='config.py'):
if (len(config._config) > 0):
return
logging.info('use configuration: %s', file)
data = {}
execfile(file, data)
config._config = data['config']
|
class HDF5Matrix():
refs = defaultdict(int)
def __init__(self, datapath, dataset, start, end, normalizer=None):
if (datapath not in list(self.refs.keys())):
f = h5py.File(datapath)
self.refs[datapath] = f
else:
f = self.refs[datapath]
self.start = start
self.end = end
self.data = f[dataset]
self.normalizer = normalizer
def __len__(self):
return (self.end - self.start)
def __getitem__(self, key):
if isinstance(key, slice):
if ((key.stop + self.start) <= self.end):
idx = slice((key.start + self.start), (key.stop + self.start))
else:
raise IndexError
elif isinstance(key, int):
if ((key + self.start) < self.end):
idx = (key + self.start)
else:
raise IndexError
elif isinstance(key, np.ndarray):
if ((np.max(key) + self.start) < self.end):
idx = (self.start + key).tolist()
else:
raise IndexError
elif isinstance(key, list):
if ((max(key) + self.start) < self.end):
idx = [(x + self.start) for x in key]
else:
raise IndexError
if (self.normalizer is not None):
return self.normalizer(self.data[idx])
else:
return self.data[idx]
@property
def shape(self):
return tuple([(self.end - self.start), self.data.shape[1]])
|
def save_array(array, name):
import tables
f = tables.open_file(name, 'w')
atom = tables.Atom.from_dtype(array.dtype)
ds = f.createCArray(f.root, 'data', atom, array.shape)
ds[:] = array
f.close()
|
def load_array(name):
import tables
f = tables.open_file(name)
array = f.root.data
a = np.empty(shape=array.shape, dtype=array.dtype)
a[:] = array[:]
f.close()
return a
|
def serialize_to_file(obj, path, protocol=cPickle.HIGHEST_PROTOCOL):
f = open(path, 'wb')
cPickle.dump(obj, f, protocol=protocol)
f.close()
|
def deserialize_from_file(path):
f = open(path, 'rb')
obj = cPickle.load(f)
f.close()
return obj
|
def to_categorical(y, nb_classes=None):
'Convert class vector (integers from 0 to nb_classes)\n to binary class matrix, for use with categorical_crossentropy\n '
y = np.asarray(y, dtype='int32')
if (not nb_classes):
nb_classes = (np.max(y) + 1)
Y = np.zeros((len(y), nb_classes))
for i in range(len(y)):
Y[(i, y[i])] = 1.0
return Y
|
def normalize(a, axis=(- 1), order=2):
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[(l2 == 0)] = 1
return (a / np.expand_dims(l2, axis))
|
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum((1 - epsilon), p)
res = sum(((y * sp.log(p)) + (sp.subtract(1, y) * sp.log(sp.subtract(1, p)))))
res *= ((- 1.0) / len(y))
return res
|
def multiclass_logloss(P, Y):
score = 0.0
npreds = [P[i][(Y[i] - 1)] for i in range(len(Y))]
score = ((- (1.0 / len(Y))) * np.sum(np.log(npreds)))
return score
|
def accuracy(p, y):
return np.mean([(a == b) for (a, b) in zip(p, y)])
|
def probas_to_classes(y_pred):
if ((len(y_pred.shape) > 1) and (y_pred.shape[1] > 1)):
return categorical_probas_to_classes(y_pred)
return np.array([(1 if (p > 0.5) else 0) for p in y_pred])
|
def categorical_probas_to_classes(p):
return np.argmax(p, axis=1)
|
def get_test_data(nb_train=1000, nb_test=500, input_shape=(10,), output_shape=(2,), classification=True, nb_class=2):
'\n classification=True overrides output_shape\n (i.e. output_shape is set to (1,)) and the output\n consists in integers in [0, nb_class-1].\n\n Otherwise: float output with shape output_shape.\n '
nb_sample = (nb_train + nb_test)
if classification:
y = np.random.randint(0, nb_class, size=(nb_sample, 1))
X = np.zeros(((nb_sample,) + input_shape))
for i in range(nb_sample):
X[i] = np.random.normal(loc=y[i], scale=1.0, size=input_shape)
else:
y_loc = np.random.random((nb_sample,))
X = np.zeros(((nb_sample,) + input_shape))
y = np.zeros(((nb_sample,) + output_shape))
for i in range(nb_sample):
X[i] = np.random.normal(loc=y_loc[i], scale=1.0, size=input_shape)
y[i] = np.random.normal(loc=y_loc[i], scale=1.0, size=output_shape)
return ((X[:nb_train], y[:nb_train]), (X[nb_train:], y[nb_train:]))
|
def typename(x):
return type(x).__name__
|
def escape(text):
text = text.replace('"', '`').replace("'", '`').replace(' ', '-SP-').replace('\t', '-TAB-').replace('\n', '-NL-').replace('(', '-LRB-').replace(')', '-RRB-').replace('|', '-BAR-')
return (repr(text)[1:(- 1)] if text else '-NONE-')
|
def makestr(node):
if isinstance(node, ast.AST):
n = 0
nodename = typename(node)
s = ('(' + nodename)
for (chname, chval) in ast.iter_fields(node):
chstr = makestr(chval)
if chstr:
s += ((((' (' + chname) + ' ') + chstr) + ')')
n += 1
if (not n):
s += ((' -' + nodename) + '-')
s += ')'
return s
elif isinstance(node, list):
n = 0
s = '(list'
for ch in node:
chstr = makestr(ch)
if chstr:
s += (' ' + chstr)
n += 1
s += ')'
return (s if n else '')
elif isinstance(node, str):
return (('(str ' + escape(node)) + ')')
elif isinstance(node, bytes):
return (('(bytes ' + escape(str(node))) + ')')
else:
return (((('(' + typename(node)) + ' ') + str(node)) + ')')
|
def main():
p_elif = re.compile('^elif\\s?')
p_else = re.compile('^else\\s?')
p_try = re.compile('^try\\s?')
p_except = re.compile('^except\\s?')
p_finally = re.compile('^finally\\s?')
p_decorator = re.compile('^@.*')
for l in ['val = Header ( val , encoding ) . encode ( )']:
l = l.strip()
if (not l):
print()
sys.stdout.flush()
continue
if p_elif.match(l):
l = ('if True: pass\n' + l)
if p_else.match(l):
l = ('if True: pass\n' + l)
if p_try.match(l):
l = (l + 'pass\nexcept: pass')
elif p_except.match(l):
l = ('try: pass\n' + l)
elif p_finally.match(l):
l = ('try: pass\n' + l)
if p_decorator.match(l):
l = (l + '\ndef dummy(): pass')
if (l[(- 1)] == ':'):
l = (l + 'pass')
parse = ast.parse(l)
parse = parse.body[0]
dump = makestr(parse)
print(dump)
sys.stdout.flush()
|
def is_numeric(s):
if (s[0] in ('-', '+')):
return s[1:].isdigit()
return s.isdigit()
|
def process_story(text):
'Processed a story text into an (article, summary) tuple.\n '
elements = text.split('@highlight')
elements = [_.strip() for _ in elements]
story_text = elements[0]
highlights = elements[1:]
highlights_joined = '; '.join(highlights)
highlights_joined = re.sub('\\s+', ' ', highlights_joined)
highlights_joined = highlights_joined.strip()
story_text = re.sub('\\s+', ' ', story_text)
story_text = story_text.strip()
return (story_text, highlights_joined)
|
def main(*args, **kwargs):
'Program entry point'
story_text = '\n'.join(list(fileinput.input()))
(story, highlights) = process_story(story_text)
if (story and highlights):
print('{}\t{}'.format(story, highlights))
|
def main(_argv):
'Program entry point.\n '
if FLAGS.config_path:
with gfile.GFile(FLAGS.config_path) as config_file:
config_flags = yaml.load(config_file)
for (flag_key, flag_value) in config_flags.items():
setattr(FLAGS, flag_key, flag_value)
if isinstance(FLAGS.tasks, string_types):
FLAGS.tasks = _maybe_load_yaml(FLAGS.tasks)
if isinstance(FLAGS.input_pipeline, string_types):
FLAGS.input_pipeline = _maybe_load_yaml(FLAGS.input_pipeline)
input_pipeline_infer = input_pipeline.make_input_pipeline_from_def(FLAGS.input_pipeline, mode=tf.contrib.learn.ModeKeys.INFER, shuffle=False, num_epochs=1)
train_options = training_utils.TrainOptions.load(FLAGS.model_dir)
model_cls = (locate(train_options.model_class) or getattr(models, train_options.model_class))
model_params = train_options.model_params
model_params = _deep_merge_dict(model_params, _maybe_load_yaml(FLAGS.model_params))
model = model_cls(params=model_params, mode=tf.contrib.learn.ModeKeys.INFER)
hooks = []
for tdict in FLAGS.tasks:
if (not ('params' in tdict)):
tdict['params'] = {}
task_cls = (locate(tdict['class']) or getattr(tasks, tdict['class']))
task = task_cls(tdict['params'])
hooks.append(task)
(predictions, _, _) = create_inference_graph(model=model, input_pipeline=input_pipeline_infer, batch_size=FLAGS.batch_size)
saver = tf.train.Saver()
checkpoint_path = FLAGS.checkpoint_path
if (not checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)
def session_init_op(_scaffold, sess):
saver.restore(sess, checkpoint_path)
tf.logging.info('Restored model from %s', checkpoint_path)
scaffold = tf.train.Scaffold(init_fn=session_init_op)
session_creator = tf.train.ChiefSessionCreator(scaffold=scaffold)
with tf.train.MonitoredSession(session_creator=session_creator, hooks=hooks) as sess:
while (not sess.should_stop()):
sess.run([])
|
def _add_graph_level(graph, level, parent_ids, names, scores):
'Adds a levelto the passed graph'
for (i, parent_id) in enumerate(parent_ids):
new_node = (level, i)
parent_node = ((level - 1), parent_id)
graph.add_node(new_node)
graph.node[new_node]['name'] = names[i]
graph.node[new_node]['score'] = str(scores[i])
graph.node[new_node]['size'] = 100
graph.add_edge(parent_node, new_node)
|
def create_graph(predicted_ids, parent_ids, scores, vocab=None):
def get_node_name(pred):
return (vocab[pred] if vocab else str(pred))
seq_length = predicted_ids.shape[0]
graph = nx.DiGraph()
for level in range(seq_length):
names = [get_node_name(pred) for pred in predicted_ids[level]]
_add_graph_level(graph, (level + 1), parent_ids[level], names, scores[level])
graph.node[(0, 0)]['name'] = 'START'
return graph
|
def main():
beam_data = np.load(ARGS.data)
vocab = None
if ARGS.vocab:
with open(ARGS.vocab) as file:
vocab = file.readlines()
vocab = [_.strip() for _ in vocab]
vocab += ['UNK', 'SEQUENCE_START', 'SEQUENCE_END']
if (not os.path.exists(ARGS.output_dir)):
os.makedirs(ARGS.output_dir)
shutil.copy2('./bin/tools/beam_search_viz/tree.css', ARGS.output_dir)
shutil.copy2('./bin/tools/beam_search_viz/tree.js', ARGS.output_dir)
for idx in range(len(beam_data['predicted_ids'])):
predicted_ids = beam_data['predicted_ids'][idx]
parent_ids = beam_data['beam_parent_ids'][idx]
scores = beam_data['scores'][idx]
graph = create_graph(predicted_ids=predicted_ids, parent_ids=parent_ids, scores=scores, vocab=vocab)
json_str = json.dumps(json_graph.tree_data(graph, (0, 0)), ensure_ascii=False)
html_str = HTML_TEMPLATE.substitute(DATA=json_str)
output_path = os.path.join(ARGS.output_dir, '{:06d}.html'.format(idx))
with open(output_path, 'w') as file:
file.write(html_str)
print(output_path)
|
def make_copy(num_examples, min_len, max_len):
'\n Generates a dataset where the target is equal to the source.\n Sequence lengths are chosen randomly from [min_len, max_len].\n\n Args:\n num_examples: Number of examples to generate\n min_len: Minimum sequence length\n max_len: Maximum sequence length\n\n Returns:\n An iterator of (source, target) string tuples.\n '
for _ in range(num_examples):
turn_length = np.random.choice(np.arange(min_len, (max_len + 1)))
source_tokens = np.random.choice(list(VOCABULARY), size=turn_length, replace=True)
target_tokens = source_tokens
(yield (' '.join(source_tokens), ' '.join(target_tokens)))
|
def make_reverse(num_examples, min_len, max_len):
'\n Generates a dataset where the target is equal to the source reversed.\n Sequence lengths are chosen randomly from [min_len, max_len].\n\n Args:\n num_examples: Number of examples to generate\n min_len: Minimum sequence length\n max_len: Maximum sequence length\n\n Returns:\n An iterator of (source, target) string tuples.\n '
for _ in range(num_examples):
turn_length = np.random.choice(np.arange(min_len, (max_len + 1)))
source_tokens = np.random.choice(list(VOCABULARY), size=turn_length, replace=True)
target_tokens = source_tokens[::(- 1)]
(yield (' '.join(source_tokens), ' '.join(target_tokens)))
|
def write_parallel_text(sources, targets, output_prefix):
'\n Writes two files where each line corresponds to one example\n - [output_prefix].sources.txt\n - [output_prefix].targets.txt\n\n Args:\n sources: Iterator of source strings\n targets: Iterator of target strings\n output_prefix: Prefix for the output file\n '
source_filename = os.path.abspath(os.path.join(output_prefix, 'sources.txt'))
target_filename = os.path.abspath(os.path.join(output_prefix, 'targets.txt'))
with io.open(source_filename, 'w', encoding='utf8') as source_file:
for record in sources:
source_file.write((record + '\n'))
print('Wrote {}'.format(source_filename))
with io.open(target_filename, 'w', encoding='utf8') as target_file:
for record in targets:
target_file.write((record + '\n'))
print('Wrote {}'.format(target_filename))
|
def main():
'Main function'
if (ARGS.type == 'copy'):
generate_fn = make_copy
elif (ARGS.type == 'reverse'):
generate_fn = make_reverse
examples = list(generate_fn(ARGS.num_examples, ARGS.min_len, ARGS.max_len))
try:
os.makedirs(ARGS.output_dir)
except OSError:
if (not os.path.isdir(ARGS.output_dir)):
raise
(train_sources, train_targets) = zip(*examples)
write_parallel_text(train_sources, train_targets, ARGS.output_dir)
|
def _register_function_ops(func_list):
'Registers custom ops in the default graph. This is needed\n Because our checkpoint is saved with ops that are not part of Tensorflow.'
op_dict = op_def_registry.get_registered_ops()
for func in func_list:
func._create_definition_if_needed()
op_def = func._definition.signature
op_dict[op_def.name] = op_def
RegisterShape(op_def.name)(common_shapes.unknown_shape)
|
def load_metadata(model_dir):
'Loads RunMetadata, Graph and OpLog from files\n '
run_meta_path = os.path.join(model_dir, 'metadata/run_meta')
run_meta = tf.RunMetadata()
if gfile.Exists(run_meta_path):
with gfile.GFile(run_meta_path, 'rb') as file:
run_meta.MergeFromString(file.read())
print('Loaded RunMetadata from {}'.format(run_meta_path))
else:
print('RunMetadata does not exist a {}. Skipping.'.format(run_meta_path))
graph_def_path = os.path.join(model_dir, 'graph.pbtxt')
graph = tf.Graph()
if gfile.Exists(graph_def_path):
with graph.as_default():
_register_function_ops(CUSTOM_OP_FUNCTIONS)
graph_def = tf.GraphDef()
with gfile.GFile(graph_def_path, 'rb') as file:
text_format.Parse(file.read(), graph_def)
tf.import_graph_def(graph_def, name='')
print('Loaded Graph from {}'.format(graph_def_path))
else:
print('Graph does not exist a {}. Skipping.'.format(graph_def_path))
op_log_path = os.path.join(model_dir, 'metadata/tfprof_log')
op_log = tfprof_log_pb2.OpLog()
if gfile.Exists(op_log_path):
with gfile.GFile(op_log_path, 'rb') as file:
op_log.MergeFromString(file.read())
print('Loaded OpLog from {}'.format(op_log_path))
else:
print('OpLog does not exist a {}. Skipping.'.format(op_log_path))
return (run_meta, graph, op_log)
|
def merge_default_with_oplog(graph, op_log=None, run_meta=None):
'Monkeypatch. There currently is a bug in tfprof_logger that\n prevents it from being used with Python 3. So we override the method\n manually until the fix comes in.\n '
tmp_op_log = tfprof_log_pb2.OpLog()
logged_ops = tfprof_logger._get_logged_ops(graph, run_meta)
if (not op_log):
tmp_op_log.log_entries.extend(logged_ops.values())
else:
all_ops = dict()
for entry in op_log.log_entries:
all_ops[entry.name] = entry
for (op_name, entry) in six.iteritems(logged_ops):
if (op_name in all_ops):
all_ops[op_name].types.extend(entry.types)
if ((entry.float_ops > 0) and (all_ops[op_name].float_ops == 0)):
all_ops[op_name].float_ops = entry.float_ops
else:
all_ops[op_name] = entry
tmp_op_log.log_entries.extend(all_ops.values())
return tmp_op_log
|
def param_analysis_options(output_dir):
'Options for model parameter analysis\n '
options = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
options['select'] = ['params', 'bytes']
options['order_by'] = 'params'
options['account_type_regexes'] = ['Variable']
if output_dir:
options['dump_to_file'] = os.path.join(output_dir, 'params.txt')
return ('scope', options)
|
def micro_anaylsis_options(output_dir):
'Options for microsecond analysis\n '
options = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
options['select'] = ['micros', 'device']
options['min_micros'] = 1000
options['account_type_regexes'] = ['.*']
options['order_by'] = 'micros'
if output_dir:
options['dump_to_file'] = os.path.join(output_dir, 'micro.txt')
return ('graph', options)
|
def flops_analysis_options(output_dir):
'Options for FLOPS analysis\n '
options = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
options['select'] = ['float_ops', 'micros', 'device']
options['min_float_ops'] = 1
options['order_by'] = 'float_ops'
options['account_type_regexes'] = ['.*']
if output_dir:
options['dump_to_file'] = os.path.join(output_dir, 'flops.txt')
return ('scope', options)
|
def device_analysis_options(output_dir):
'Options for device placement analysis\n '
options = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
options['select'] = ['device', 'float_ops', 'micros']
options['order_by'] = 'name'
options['account_type_regexes'] = ['.*']
if output_dir:
options['dump_to_file'] = os.path.join(output_dir, 'device.txt')
return ('scope', options)
|
def main(_argv):
'Main functions. Runs all anaylses.'
tfprof_logger._merge_default_with_oplog = merge_default_with_oplog
FLAGS.model_dir = os.path.abspath(os.path.expanduser(FLAGS.model_dir))
output_dir = os.path.join(FLAGS.model_dir, 'profile')
gfile.MakeDirs(output_dir)
(run_meta, graph, op_log) = load_metadata(FLAGS.model_dir)
param_arguments = [param_analysis_options(output_dir), micro_anaylsis_options(output_dir), flops_analysis_options(output_dir), device_analysis_options(output_dir)]
for (tfprof_cmd, params) in param_arguments:
model_analyzer.print_model_analysis(graph=graph, run_meta=run_meta, op_log=op_log, tfprof_cmd=tfprof_cmd, tfprof_options=params)
if (params['dump_to_file'] != ''):
print('Wrote {}'.format(params['dump_to_file']))
|
def create_experiment(output_dir):
'\n Creates a new Experiment instance.\n\n Args:\n output_dir: Output directory for model checkpoints and summaries.\n '
config = run_config.RunConfig(tf_random_seed=FLAGS.tf_random_seed, save_checkpoints_secs=FLAGS.save_checkpoints_secs, save_checkpoints_steps=FLAGS.save_checkpoints_steps, keep_checkpoint_max=FLAGS.keep_checkpoint_max, keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours, gpu_memory_fraction=FLAGS.gpu_memory_fraction)
config.tf_config.gpu_options.allow_growth = FLAGS.gpu_allow_growth
config.tf_config.log_device_placement = FLAGS.log_device_placement
train_options = training_utils.TrainOptions(model_class=FLAGS.model, model_params=FLAGS.model_params)
if config.is_chief:
gfile.MakeDirs(output_dir)
train_options.dump(output_dir)
bucket_boundaries = None
if FLAGS.buckets:
bucket_boundaries = list(map(int, FLAGS.buckets.split(',')))
train_input_pipeline = input_pipeline.make_input_pipeline_from_def(def_dict=FLAGS.input_pipeline_train, mode=tf.contrib.learn.ModeKeys.TRAIN)
train_input_fn = training_utils.create_input_fn(pipeline=train_input_pipeline, batch_size=FLAGS.batch_size, bucket_boundaries=bucket_boundaries, scope='train_input_fn')
dev_input_pipeline = input_pipeline.make_input_pipeline_from_def(def_dict=FLAGS.input_pipeline_dev, mode=tf.contrib.learn.ModeKeys.EVAL, shuffle=False, num_epochs=1)
eval_input_fn = training_utils.create_input_fn(pipeline=dev_input_pipeline, batch_size=FLAGS.batch_size, allow_smaller_final_batch=True, scope='dev_input_fn')
def model_fn(features, labels, params, mode):
'Builds the model graph'
model = _create_from_dict({'class': train_options.model_class, 'params': train_options.model_params}, models, mode=mode)
return model(features, labels, params)
estimator = tf.contrib.learn.Estimator(model_fn=model_fn, model_dir=output_dir, config=config, params=FLAGS.model_params)
train_hooks = []
for dict_ in FLAGS.hooks:
hook = _create_from_dict(dict_, hooks, model_dir=estimator.model_dir, run_config=config)
train_hooks.append(hook)
eval_metrics = {}
for dict_ in FLAGS.metrics:
metric = _create_from_dict(dict_, metric_specs)
eval_metrics[metric.name] = metric
experiment = PatchedExperiment(estimator=estimator, train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, min_eval_frequency=FLAGS.eval_every_n_steps, train_steps=FLAGS.train_steps, eval_steps=None, eval_metrics=eval_metrics, train_monitors=train_hooks)
return experiment
|
def main(_argv):
'The entrypoint for the script'
FLAGS.hooks = _maybe_load_yaml(FLAGS.hooks)
FLAGS.metrics = _maybe_load_yaml(FLAGS.metrics)
FLAGS.model_params = _maybe_load_yaml(FLAGS.model_params)
FLAGS.input_pipeline_train = _maybe_load_yaml(FLAGS.input_pipeline_train)
FLAGS.input_pipeline_dev = _maybe_load_yaml(FLAGS.input_pipeline_dev)
final_config = {}
if FLAGS.config_paths:
for config_path in FLAGS.config_paths.split(','):
config_path = config_path.strip()
if (not config_path):
continue
config_path = os.path.abspath(config_path)
tf.logging.info('Loading config from %s', config_path)
with gfile.GFile(config_path.strip()) as config_file:
config_flags = yaml.load(config_file)
final_config = _deep_merge_dict(final_config, config_flags)
tf.logging.info('Final Config:\n%s', yaml.dump(final_config))
for (flag_key, flag_value) in final_config.items():
if (hasattr(FLAGS, flag_key) and isinstance(getattr(FLAGS, flag_key), dict)):
merged_value = _deep_merge_dict(flag_value, getattr(FLAGS, flag_key))
setattr(FLAGS, flag_key, merged_value)
elif hasattr(FLAGS, flag_key):
setattr(FLAGS, flag_key, flag_value)
else:
tf.logging.warning('Ignoring config flag: %s', flag_key)
if ((FLAGS.save_checkpoints_secs is None) and (FLAGS.save_checkpoints_steps is None)):
FLAGS.save_checkpoints_secs = 600
tf.logging.info('Setting save_checkpoints_secs to %d', FLAGS.save_checkpoints_secs)
if (not FLAGS.output_dir):
FLAGS.output_dir = tempfile.mkdtemp()
if (not FLAGS.input_pipeline_train):
raise ValueError('You must specify input_pipeline_train')
if (not FLAGS.input_pipeline_dev):
raise ValueError('You must specify input_pipeline_dev')
learn_runner.run(experiment_fn=create_experiment, output_dir=FLAGS.output_dir, schedule=FLAGS.schedule)
|
class abstractstaticmethod(staticmethod):
'Decorates a method as abstract and static'
__slots__ = ()
def __init__(self, function):
super(abstractstaticmethod, self).__init__(function)
function.__isabstractmethod__ = True
__isabstractmethod__ = True
|
def _create_from_dict(dict_, default_module, *args, **kwargs):
'Creates a configurable class from a dictionary. The dictionary must have\n "class" and "params" properties. The class can be either fully qualified, or\n it is looked up in the modules passed via `default_module`.\n '
class_ = (locate(dict_['class']) or getattr(default_module, dict_['class']))
params = {}
if ('params' in dict_):
params = dict_['params']
instance = class_(params, *args, **kwargs)
return instance
|
def _maybe_load_yaml(item):
'Parses `item` only if it is a string. If `item` is a dictionary\n it is returned as-is.\n '
if isinstance(item, six.string_types):
return yaml.load(item)
elif isinstance(item, dict):
return item
else:
raise ValueError('Got {}, expected YAML string or dict', type(item))
|
def _deep_merge_dict(dict_x, dict_y, path=None):
'Recursively merges dict_y into dict_x.\n '
if (path is None):
path = []
for key in dict_y:
if (key in dict_x):
if (isinstance(dict_x[key], dict) and isinstance(dict_y[key], dict)):
_deep_merge_dict(dict_x[key], dict_y[key], (path + [str(key)]))
elif (dict_x[key] == dict_y[key]):
pass
else:
dict_x[key] = dict_y[key]
else:
dict_x[key] = dict_y[key]
return dict_x
|
def _parse_params(params, default_params):
'Parses parameter values to the types defined by the default parameters.\n Default parameters are used for missing values.\n '
if (params is None):
params = {}
result = copy.deepcopy(default_params)
for (key, value) in params.items():
if (key not in default_params):
print('unknown key', key, value)
continue
if isinstance(value, dict):
default_dict = default_params[key]
if (not isinstance(default_dict, dict)):
raise ValueError('%s should not be a dictionary', key)
if default_dict:
value = _parse_params(value, default_dict)
else:
pass
if (value is None):
continue
if (default_params[key] is None):
result[key] = value
else:
result[key] = type(default_params[key])(value)
return result
|
@six.add_metaclass(abc.ABCMeta)
class Configurable(object):
'Interface for all classes that are configurable\n via a parameters dictionary.\n\n Args:\n params: A dictionary of parameters.\n mode: A value in tf.contrib.learn.ModeKeys\n '
def __init__(self, params, mode):
self._params = _parse_params(params, self.default_params())
self._mode = mode
self._print_params()
def _print_params(self):
'Logs parameter values'
classname = self.__class__.__name__
tf.logging.info('Creating %s in mode=%s', classname, self._mode)
tf.logging.info('\n%s', yaml.dump({classname: self._params}))
@property
def mode(self):
'Returns a value in tf.contrib.learn.ModeKeys.\n '
return self._mode
@property
def params(self):
'Returns a dictionary of parsed parameters.\n '
return self._params
@abstractstaticmethod
def default_params():
'Returns a dictionary of default parameters. The default parameters\n are used to define the expected type of passed parameters. Missing\n parameter values are replaced with the defaults returned by this method.\n '
raise NotImplementedError
|
class Experiment(tf.contrib.learn.Experiment):
'A patched tf.learn Experiment class to handle GPU memory\n sharing issues.'
def __init__(self, train_steps_per_iteration=None, *args, **kwargs):
super(Experiment, self).__init__(*args, **kwargs)
self._train_steps_per_iteration = train_steps_per_iteration
def _has_training_stopped(self, eval_result):
'Determines whether the training has stopped.'
if (not eval_result):
return False
global_step = eval_result.get(tf.GraphKeys.GLOBAL_STEP)
return (global_step and self._train_steps and (global_step >= self._train_steps))
def continuous_train_and_eval(self, continuous_eval_predicate_fn=None):
"Interleaves training and evaluation.\n\n The frequency of evaluation is controlled by the `train_steps_per_iteration`\n (via constructor). The model will be first trained for\n `train_steps_per_iteration`, and then be evaluated in turns.\n\n This differs from `train_and_evaluate` as follows:\n 1. The procedure will have train and evaluation in turns. The model\n will be trained for a number of steps (usuallly smaller than `train_steps`\n if provided) and then be evaluated. `train_and_evaluate` will train the\n model for `train_steps` (no small training iteraions).\n\n 2. Due to the different approach this schedule takes, it leads to two\n differences in resource control. First, the resources (e.g., memory) used\n by training will be released before evaluation (`train_and_evaluate` takes\n double resources). Second, more checkpoints will be saved as a checkpoint\n is generated at the end of each small trainning iteration.\n\n Args:\n continuous_eval_predicate_fn: A predicate function determining whether to\n continue after each iteration. `predicate_fn` takes the evaluation\n results as its arguments. At the beginning of evaluation, the passed\n eval results will be None so it's expected that the predicate function\n handles that gracefully. When `predicate_fn` is not specified, this will\n run in an infinite loop or exit when global_step reaches `train_steps`.\n\n Returns:\n A tuple of the result of the `evaluate` call to the `Estimator` and the\n export results using the specified `ExportStrategy`.\n\n Raises:\n ValueError: if `continuous_eval_predicate_fn` is neither None nor\n callable.\n "
if ((continuous_eval_predicate_fn is not None) and (not callable(continuous_eval_predicate_fn))):
raise ValueError('`continuous_eval_predicate_fn` must be a callable, or None.')
eval_result = None
train_steps_per_iteration = 1000
if (self._train_steps_per_iteration is not None):
train_steps_per_iteration = self._train_steps_per_iteration
elif (self._train_steps is not None):
train_steps_per_iteration = min(self._min_eval_frequency, self._train_steps)
while ((not continuous_eval_predicate_fn) or continuous_eval_predicate_fn(eval_result)):
if self._has_training_stopped(eval_result):
tf.logging.info('Stop training model as max steps reached')
break
tf.logging.info('Training model for %s steps', train_steps_per_iteration)
self._estimator.fit(input_fn=self._train_input_fn, steps=train_steps_per_iteration, monitors=self._train_monitors)
tf.logging.info('Evaluating model now.')
tf.get_default_graph().finalize()
eval_result = self._estimator.evaluate(input_fn=self._eval_input_fn, steps=self._eval_steps, metrics=self._eval_metrics, name='one_pass', hooks=self._eval_hooks)
return (eval_result, self._maybe_export(eval_result))
|
class ExtendedMultiRNNCell(MultiRNNCell):
'Extends the Tensorflow MultiRNNCell with residual connections'
def __init__(self, cells, residual_connections=False, residual_combiner='add', residual_dense=False):
'Create a RNN cell composed sequentially of a number of RNNCells.\n\n Args:\n cells: list of RNNCells that will be composed in this order.\n state_is_tuple: If True, accepted and returned states are n-tuples, where\n `n = len(cells)`. If False, the states are all\n concatenated along the column axis. This latter behavior will soon be\n deprecated.\n residual_connections: If true, add residual connections between all cells.\n This requires all cells to have the same output_size. Also, iff the\n input size is not equal to the cell output size, a linear transform\n is added before the first layer.\n residual_combiner: One of "add" or "concat". To create inputs for layer\n t+1 either "add" the inputs from the prev layer or concat them.\n residual_dense: Densely connect each layer to all other layers\n\n Raises:\n ValueError: if cells is empty (not allowed), or at least one of the cells\n returns a state tuple but the flag `state_is_tuple` is `False`.\n '
super(ExtendedMultiRNNCell, self).__init__(cells, state_is_tuple=True)
assert (residual_combiner in ['add', 'concat', 'mean'])
self._residual_connections = residual_connections
self._residual_combiner = residual_combiner
self._residual_dense = residual_dense
def __call__(self, inputs, state, scope=None):
'Run this multi-layer cell on inputs, starting from state.'
if (not self._residual_connections):
return super(ExtendedMultiRNNCell, self).__call__(inputs, state, (scope or 'extended_multi_rnn_cell'))
with tf.variable_scope((scope or 'extended_multi_rnn_cell')):
if ((self._cells[0].output_size != inputs.get_shape().as_list()[1]) and (self._residual_combiner in ['add', 'mean'])):
inputs = tf.contrib.layers.fully_connected(inputs=inputs, num_outputs=self._cells[0].output_size, activation_fn=None, scope='input_transform')
cur_inp = inputs
prev_inputs = [cur_inp]
new_states = []
for (i, cell) in enumerate(self._cells):
with tf.variable_scope(('cell_%d' % i)):
if (not nest.is_sequence(state)):
raise ValueError(('Expected state to be a tuple of length %d, but received: %s' % (len(self.state_size), state)))
cur_state = state[i]
(next_input, new_state) = cell(cur_inp, cur_state)
input_to_combine = prev_inputs[(- 1):]
if self._residual_dense:
input_to_combine = prev_inputs
if (self._residual_combiner == 'add'):
next_input = (next_input + sum(input_to_combine))
if (self._residual_combiner == 'mean'):
combined_mean = tf.reduce_mean(tf.stack(input_to_combine), 0)
next_input = (next_input + combined_mean)
elif (self._residual_combiner == 'concat'):
next_input = tf.concat(([next_input] + input_to_combine), 1)
cur_inp = next_input
prev_inputs.append(cur_inp)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple else array_ops.concat(new_states, 1))
return (cur_inp, new_states)
|
def _transpose_batch_time(x):
'Transpose the batch and time dimensions of a Tensor.\n\n Retains as much of the static shape information as possible.\n\n Args:\n x: A tensor of rank 2 or higher.\n\n Returns:\n x transposed along the first two dimensions.\n\n Raises:\n ValueError: if `x` is rank 1 or lower.\n '
x_static_shape = x.get_shape()
if ((x_static_shape.ndims is not None) and (x_static_shape.ndims < 2)):
raise ValueError(('Expected input tensor %s to have rank at least 2, but saw shape: %s' % (x, x_static_shape)))
x_rank = array_ops.rank(x)
x_t = array_ops.transpose(x, array_ops.concat(([1, 0], math_ops.range(2, x_rank)), axis=0))
x_t.set_shape(tensor_shape.TensorShape([x_static_shape[1].value, x_static_shape[0].value]).concatenate(x_static_shape[2:]))
return x_t
|
@six.add_metaclass(abc.ABCMeta)
class Decoder(object):
'An RNN Decoder abstract interface object.'
@property
def batch_size(self):
'The batch size of the inputs returned by `sample`.'
raise NotImplementedError
@property
def output_size(self):
'A (possibly nested tuple of...) integer[s] or `TensorShape` object[s].'
raise NotImplementedError
@property
def output_dtype(self):
'A (possibly nested tuple of...) dtype[s].'
raise NotImplementedError
@abc.abstractmethod
def initialize(self, name=None):
'Called before any decoding iterations.\n\n Args:\n name: Name scope for any created operations.\n\n Returns:\n `(finished, first_inputs, initial_state)`.\n '
raise NotImplementedError
@abc.abstractmethod
def step(self, time, inputs, state, name=None):
'Called per step of decoding (but only once for dynamic decoding).\n\n Args:\n time: Scalar `int32` tensor.\n inputs: Input (possibly nested tuple of) tensor[s] for this time step.\n state: State (possibly nested tuple of) tensor[s] from previous time step.\n name: Name scope for any created operations.\n\n Returns:\n `(outputs, next_state, next_inputs, finished)`.\n '
raise NotImplementedError
|
def _create_zero_outputs(size, dtype, batch_size):
'Create a zero outputs Tensor structure.'
def _t(s):
return (s if isinstance(s, ops.Tensor) else constant_op.constant(tensor_shape.TensorShape(s).as_list(), dtype=dtypes.int32, name='zero_suffix_shape'))
def _create(s, d):
return array_ops.zeros(array_ops.concat(([batch_size], _t(s)), axis=0), dtype=d)
return nest.map_structure(_create, size, dtype)
|
def dynamic_decode(decoder, output_time_major=False, impute_finished=False, maximum_iterations=None, parallel_iterations=32, swap_memory=False, scope=None):
'Perform dynamic decoding with `decoder`.\n\n Args:\n decoder: A `Decoder` instance.\n output_time_major: Python boolean. Default: `False` (batch major). If\n `True`, outputs are returned as time major tensors (this mode is faster).\n Otherwise, outputs are returned as batch major tensors (this adds extra\n time to the computation).\n impute_finished: Python boolean. If `True`, then states for batch\n entries which are marked as finished get copied through and the\n corresponding outputs get zeroed out. This causes some slowdown at\n each time step, but ensures that the final state and outputs have\n the correct values and that backprop ignores time steps that were\n marked as finished.\n maximum_iterations: `int32` scalar, maximum allowed number of decoding\n steps. Default is `None` (decode until the decoder is fully done).\n parallel_iterations: Argument passed to `tf.while_loop`.\n swap_memory: Argument passed to `tf.while_loop`.\n scope: Optional variable scope to use.\n\n Returns:\n `(final_outputs, final_state)`.\n\n Raises:\n TypeError: if `decoder` is not an instance of `Decoder`.\n ValueError: if maximum_iterations is provided but is not a scalar.\n '
if (not isinstance(decoder, Decoder)):
raise TypeError(('Expected decoder to be type Decoder, but saw: %s' % type(decoder)))
with variable_scope.variable_scope((scope or 'decoder')) as varscope:
if (varscope.caching_device is None):
varscope.set_caching_device((lambda op: op.device))
if (maximum_iterations is not None):
maximum_iterations = ops.convert_to_tensor(maximum_iterations, dtype=dtypes.int32, name='maximum_iterations')
if (maximum_iterations.get_shape().ndims != 0):
raise ValueError('maximum_iterations must be a scalar')
(initial_finished, initial_inputs, initial_state) = decoder.initialize()
zero_outputs = _create_zero_outputs(decoder.output_size, decoder.output_dtype, decoder.batch_size)
if (maximum_iterations is not None):
initial_finished = math_ops.logical_or(initial_finished, (0 >= maximum_iterations))
initial_time = constant_op.constant(0, dtype=dtypes.int32)
def _shape(batch_size, from_shape):
if (not isinstance(from_shape, tensor_shape.TensorShape)):
return tensor_shape.TensorShape(None)
else:
batch_size = tensor_util.constant_value(ops.convert_to_tensor(batch_size, name='batch_size'))
return tensor_shape.TensorShape([batch_size]).concatenate(from_shape)
def _create_ta(s, d):
return tensor_array_ops.TensorArray(dtype=d, size=0, dynamic_size=True, element_shape=_shape(decoder.batch_size, s))
initial_outputs_ta = nest.map_structure(_create_ta, decoder.output_size, decoder.output_dtype)
def condition(unused_time, unused_outputs_ta, unused_state, unused_inputs, finished):
return math_ops.logical_not(math_ops.reduce_all(finished))
def body(time, outputs_ta, state, inputs, finished):
'Internal while_loop body.\n\n Args:\n time: scalar int32 tensor.\n outputs_ta: structure of TensorArray.\n state: (structure of) state tensors and TensorArrays.\n inputs: (structure of) input tensors.\n finished: 1-D bool tensor.\n\n Returns:\n `(time + 1, outputs_ta, next_state, next_inputs, next_finished)`.\n '
(next_outputs, decoder_state, next_inputs, decoder_finished) = decoder.step(time, inputs, state)
next_finished = math_ops.logical_or(decoder_finished, finished)
if (maximum_iterations is not None):
next_finished = math_ops.logical_or(next_finished, ((time + 1) >= maximum_iterations))
nest.assert_same_structure(state, decoder_state)
nest.assert_same_structure(outputs_ta, next_outputs)
nest.assert_same_structure(inputs, next_inputs)
if impute_finished:
emit = nest.map_structure((lambda out, zero: array_ops.where(finished, zero, out)), next_outputs, zero_outputs)
else:
emit = next_outputs
def _maybe_copy_state(new, cur):
if isinstance(cur, tensor_array_ops.TensorArray):
pass_through = True
else:
new.set_shape(cur.shape)
pass_through = (new.shape.ndims == 0)
return (new if pass_through else array_ops.where(finished, cur, new))
if impute_finished:
next_state = nest.map_structure(_maybe_copy_state, decoder_state, state)
else:
next_state = decoder_state
outputs_ta = nest.map_structure((lambda ta, out: ta.write(time, out)), outputs_ta, emit)
return ((time + 1), outputs_ta, next_state, next_inputs, next_finished)
res = control_flow_ops.while_loop(condition, body, loop_vars=[initial_time, initial_outputs_ta, initial_state, initial_inputs, initial_finished], parallel_iterations=parallel_iterations, swap_memory=swap_memory)
final_outputs_ta = res[1]
final_state = res[2]
final_outputs = nest.map_structure((lambda ta: ta.stack()), final_outputs_ta)
if (not output_time_major):
final_outputs = nest.map_structure(_transpose_batch_time, final_outputs)
return (final_outputs, final_state)
|
class BaseCopyingDataProvider(data_provider.DataProvider):
'Base class for CopyingDataProvider. This data provider reads two datasets\n in parallel, keeping them aligned. It notes where each target copies\n from the parallel source or the schema.\n\n Args:\n dataset1: The first dataset. An instance of the Dataset class.\n dataset2: The second dataset. An instance of the Dataset class.\n Can be None. If None, only `dataset1` is read.\n schemas: The schema locations. An instance of the Dataset\n class. Can be None.\n num_readers: The number of parallel readers to use.\n shuffle: Whether to shuffle the data sources and common queue when\n reading.\n num_epochs: The number of times each data source is read. If left as None,\n the data will be cycled through indefinitely.\n common_queue_capacity: The capacity of the common queue.\n common_queue_min: The minimum number of elements in the common queue after\n a dequeue.\n seed: The seed to use if shuffling.\n '
def __init__(self, dataset1, dataset2, schemas=None, shuffle=True, num_epochs=None, common_queue_capacity=4096, common_queue_min=1024, seed=None):
if (seed is None):
seed = np.random.randint(1000000000.0)
(_, data_source) = parallel_reader.parallel_read(dataset1.data_sources, reader_class=dataset1.reader, num_epochs=num_epochs, num_readers=1, shuffle=False, capacity=common_queue_capacity, min_after_dequeue=common_queue_min, seed=seed)
data_target = ''
if (dataset2 is not None):
(_, data_target) = parallel_reader.parallel_read(dataset2.data_sources, reader_class=dataset2.reader, num_epochs=num_epochs, num_readers=1, shuffle=False, capacity=common_queue_capacity, min_after_dequeue=common_queue_min, seed=seed)
data_schemas = ''
if (schemas is not None):
(_, data_schemas) = parallel_reader.parallel_read(schemas.data_sources, reader_class=schemas.reader, num_epochs=num_epochs, num_readers=1, shuffle=False, capacity=common_queue_capacity, min_after_dequeue=common_queue_min, seed=seed)
if shuffle:
shuffle_queue = tf.RandomShuffleQueue(capacity=common_queue_capacity, min_after_dequeue=common_queue_min, dtypes=[tf.string, tf.string, tf.string], seed=seed)
enqueue_ops = []
enqueue_ops.append(shuffle_queue.enqueue([data_source, data_target, data_schemas]))
tf.train.add_queue_runner(tf.train.QueueRunner(shuffle_queue, enqueue_ops))
(data_source, data_target, data_schemas) = shuffle_queue.dequeue()
items = dataset1.decoder.list_items()
tensors = dataset1.decoder.decode(data_source, items)
tensors_schema = [None]
if (schemas is not None):
items_schema = schemas.decoder.list_items()
tensors_schema = schemas.decoder.decode(data_schemas, items_schema)
if (dataset2 is not None):
target_and_copy_sources = self._target_and_copy_sources(data_target, tensors[0], tensors_schema)
items2 = dataset2.decoder.list_items()
tensors2 = dataset2.decoder.decode(target_and_copy_sources, items2)
items = (items + items2)
tensors = (tensors + tensors2)
if (schemas is not None):
items = (items + items_schema)
tensors = (tensors + tensors_schema)
super(BaseCopyingDataProvider, self).__init__(items_to_tensors=dict(zip(items, tensors)), num_samples=dataset1.num_samples)
def _target_and_copy_sources(self, data_target, source_tensors, schema_tensors):
raise NotImplementedError
|
def _make_copying_data_provider_base(data_sources_source, data_sources_schema, reader=tf.TextLineReader, num_samples=None, source_delimiter=' ', **kwargs):
'\n Prepare the Datasets that will be used to make the copying data provider.\n\n Args:\n data_sources_source: A list of data sources for the source text files.\n data_sources_schema: A list of data sources for the schema location text files.\n reader: A reader that can handle the source and schema files.\n num_samples: Optional, number of records in the dataset\n delimiter: Split tokens in the data on this delimiter. Defaults to space.\n kwargs: Additional arguments (shuffle, num_epochs, etc) that are passed\n to the data provider\n\n Returns:\n The Datasets for source and schema.\n'
decoder_source = split_tokens_decoder.SplitTokensDecoder(tokens_feature_name='source_tokens', length_feature_name='source_len', append_token='SEQUENCE_END', delimiter=source_delimiter)
dataset_source = tf.contrib.slim.dataset.Dataset(data_sources=data_sources_source, reader=reader, decoder=decoder_source, num_samples=num_samples, items_to_descriptions={})
print('source schema', data_sources_schema)
dataset_schemas = None
if (data_sources_schema is not None):
decoder_schemas = split_tokens_decoder.SplitMaskDecoder(decoder_mask_feature_name='decoder_mask', delimiter=' ')
dataset_schemas = tf.contrib.slim.dataset.Dataset(data_sources=data_sources_schema, reader=reader, decoder=decoder_schemas, num_samples=num_samples, items_to_descriptions={})
return (dataset_source, dataset_schemas)
|
def make_schema_copying_data_provider(data_sources_source, data_sources_target, data_sources_schema, reader=tf.TextLineReader, num_samples=None, source_delimiter=' ', target_delimiter=' ', **kwargs):
'\n Builds a copying data provider for schema-only copying.\n Args:\n data_sources_source: A list of data sources for the source text\n files.\n data_sources_target: A list of data sources for the targer text\n files.\n data_sources_schema: A list of data sources for the schema\n location text files.\n reader: A reader that can handle the source and schema files.\n num_samples: Optional, number of records in the dataset\n source_delimiter: Split tokens in the source data on this\n delimiter. Defaults to space.\n target_delimiter: Split tokens in the target data on this\n delimiter. Defaults to space.\n kwargs: Additional arguments (shuffle, num_epochs, etc) that are passed\n to the data provider\n\n Returns:\n A SchemaCopyingDataProvider.\n '
(dataset_source, dataset_schemas) = _make_copying_data_provider_base(data_sources_source, data_sources_schema, reader=tf.TextLineReader, num_samples=num_samples, source_delimiter=' ', **kwargs)
dataset_target = None
if (data_sources_target is not None):
decoder_target = copying_decoder.SchemaCopyingDecoder(tokens_feature_name='target_tokens', length_feature_name='target_len', prepend_token='SEQUENCE_START', append_token='SEQUENCE_END', delimiter=target_delimiter)
dataset_target = tf.contrib.slim.dataset.Dataset(data_sources=data_sources_target, reader=reader, decoder=decoder_target, num_samples=num_samples, items_to_descriptions={})
return SchemaCopyingDataProvider(dataset1=dataset_source, dataset2=dataset_target, schemas=dataset_schemas, **kwargs)
|
def make_schema_and_word_copying_data_provider(data_sources_source, data_sources_target, data_sources_schema, reader=tf.TextLineReader, num_samples=None, source_delimiter=' ', target_delimiter=' ', **kwargs):
'\n Builds a copying data provider for schema and word copying.\n Args:\n data_sources_source: A list of data sources for the source text\n files.\n data_sources_target: A list of data sources for the targer text\n files.\n data_sources_schema: A list of data sources for the schema\n location text files.\n reader: A reader that can handle the source and schema files.\n num_samples: Optional, number of records in the dataset\n source_delimiter: Split tokens in the source data on this\n delimiter. Defaults to space.\n target_delimiter: Split tokens in the target data on this\n delimiter. Defaults to space.\n kwargs: Additional arguments (shuffle, num_epochs, etc) that are passed\n to the data provider\n\n Returns:\n A SchemaAndWordCopyingDataProvider.\n '
(dataset_source, dataset_schemas) = _make_copying_data_provider_base(data_sources_source, data_sources_schema, reader=tf.TextLineReader, num_samples=num_samples, source_delimiter=' ', **kwargs)
dataset_target = None
if (data_sources_target is not None):
decoder_target = copying_decoder.SchemaAndWordCopyingDecoder(tokens_feature_name='target_tokens', length_feature_name='target_len', prepend_token='SEQUENCE_START', append_token='SEQUENCE_END', delimiter=target_delimiter)
dataset_target = tf.contrib.slim.dataset.Dataset(data_sources=data_sources_target, reader=reader, decoder=decoder_target, num_samples=num_samples, items_to_descriptions={})
return SchemaAndWordCopyingDataProvider(dataset1=dataset_source, dataset2=dataset_target, schemas=dataset_schemas, **kwargs)
|
def make_word_copying_data_provider(data_sources_source, data_sources_target, data_sources_schema=None, reader=tf.TextLineReader, num_samples=None, source_delimiter=' ', target_delimiter=' ', **kwargs):
'\n Builds a copying data provider for word-only copying.\n Args:\n data_sources_source: A list of data sources for the source text\n files.\n data_sources_target: A list of data sources for the targer text\n files.\n data_sources_schema: An optional list of data sources for the schema\n location text files.\n reader: A reader that can handle the source and schema files.\n num_samples: Optional, number of records in the dataset\n source_delimiter: Split tokens in the source data on this\n delimiter. Defaults to space.\n target_delimiter: Split tokens in the target data on this\n delimiter. Defaults to space.\n kwargs: Additional arguments (shuffle, num_epochs, etc) that are passed\n to the data provider\n\n Returns:\n A WordCopyingDataProvider.\n '
(dataset_source, dataset_schemas) = _make_copying_data_provider_base(data_sources_source, data_sources_schema, reader=tf.TextLineReader, num_samples=num_samples, source_delimiter=' ', **kwargs)
dataset_target = None
if (data_sources_target is not None):
decoder_target = copying_decoder.WordCopyingDecoder(tokens_feature_name='target_tokens', length_feature_name='target_len', prepend_token='SEQUENCE_START', append_token='SEQUENCE_END', delimiter=target_delimiter)
dataset_target = tf.contrib.slim.dataset.Dataset(data_sources=data_sources_target, reader=reader, decoder=decoder_target, num_samples=num_samples, items_to_descriptions={})
return WordCopyingDataProvider(dataset1=dataset_source, dataset2=dataset_target, schemas=dataset_schemas, **kwargs)
|
class SchemaCopyingDataProvider(BaseCopyingDataProvider):
def _target_and_copy_sources(self, data_target, source_tensors, schema):
return [data_target, None, schema]
|
class WordCopyingDataProvider(BaseCopyingDataProvider):
def _target_and_copy_sources(self, data_target, source_tensors, schema):
return [data_target, source_tensors, None]
|
class SchemaAndWordCopyingDataProvider(BaseCopyingDataProvider):
def _target_and_copy_sources(self, data_target, source_tensors, schema):
return [data_target, source_tensors, schema]
|
class BaseCopyingDecoder(split_tokens_decoder.SplitTokensDecoder):
'Base class for A DataDecoder that splits a string tensor into individual\n tokens and marks those copied from the input sequence or the schema.\n Optionally prepends or appends special tokens.\n\n Args:\n delimiter: Delimiter to split on. Must be a single character.\n tokens_feature_name: A descriptive feature name for the token values\n length_feature_name: A descriptive feature name for the length value\n '
def decode(self, data, items):
'\n Args:\n data: List of [target_string, source_tokens_list, schema_location].\n items: A list of strings, each of which indicate a particular data\n type.\n\n Returns: A dictionary with a tensor for each item in items.\n '
if (not isinstance(data, list)):
raise ValueError(("'data' arg to decode should be a three-item list, but is a %s" % str(type(data))))
if (len(data) != 3):
raise ValueError(("'data' arg to decode should be a three-item list, but is a list of length %d" % len(data)))
decoded_items = {}
tokens = tf.string_split([data[0]], delimiter=self.delimiter).values
(tokens, indices) = self._mark_all_copies(tokens, data)
if (self.prepend_token is not None):
(tokens, indices) = self._prepend(tokens, indices)
if (self.append_token is not None):
(tokens, indices) = self._append(tokens, indices)
decoded_items[self.length_feature_name] = tf.size(tokens)
decoded_items[self.tokens_feature_name] = tokens
decoded_items['indices'] = indices
return decoded_items
def _prepend(self, tokens, indices):
tokens = tf.concat([[self.prepend_token], tokens], 0, name='prepend_to_tokens')
return (tokens, indices)
def _append(self, tokens, indices):
tokens = tf.concat([tokens, [self.append_token]], 0, name='append_to_tokens')
return (tokens, indices)
def _mark_all_copies(self, tokens, data):
raise NotImplementedError
def _mark_copies(self, tokenized, copy_source, copy_token):
'Replace any token in tokenized that can be copied from copy_source\n with the copy_token, and build a tensor with the indices of the copied\n item in the source.\n For instance, could be used with the query\n SELECT NUM_CREDITS FROM COURSE WHERE\n DEPARTMENT = " EECS " AND NUMBER = 280 ;\n and the question\n "Who teaches EECS 280 ?"\n to generate\n SELECT NUM_CREDITS FROM COURSE WHERE\n DEPARTMENT = " COPY_WORD " AND NUMBER = COPY_WORD ;\n and the indices\n [0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0]\n Args:\n tokenized: a vector of tokens from the target string\n copy_source: a vector of tokens from the source string,\n or a vector of tokens from the schema.\n copy_token: the token with which to replace copies. E.g., "COPY_WORD"\n or "COPY_SCHEMA"\n Returns: tokenized with copied tokens replaced by the copy_token, and\n a vector of copying indices.\n '
def copy_fn(tokens, copy_source):
tokens_in_copy_source = np.in1d(tokens.ravel(), copy_source).reshape(tokens.shape)
indices = [np.where((copy_source == t))[0] for t in tokens]
indices = [(t[0] if (len(t) > 0) else 0) for t in indices]
indices = np.asarray(indices)
return (tokens_in_copy_source, indices)
tf_copy_result = tf.py_func(copy_fn, [tokenized, copy_source], [tf.bool, tf.int64], name=('identify_copies_%s' % copy_token))
copy_indices = tf_copy_result[1]
copy_tokens = tf.fill(tf.shape(tokenized), copy_token, name=('fill_with_%s' % copy_token))
tokenized = tf.where(tf_copy_result[0], copy_tokens, tokenized, name=('mark_copied_%s_tokens' % copy_token))
return (tokenized, copy_indices)
def list_items(self):
return [self.tokens_feature_name, self.length_feature_name]
|
class SchemaAndWordCopyingDecoder(BaseCopyingDecoder):
'\n CopyingDecoder that marks where the output sequence copies from the input\n sequence and where it copies from the schema.\n\n Args:\n delimiter: Delimiter to split on. Must be a single character.\n tokens_feature_name: A descriptive feature name for the token values\n length_feature_name: A descriptive feature name for the length value\n source_copy_feature_name: A descriptive feature name for the indices\n representing where in the input sequence a word is copied from.\n schema_copy_feature_name: A descriptive feature name for the indices\n representing where in the schema a token is copied from.\n prepend_token: Optional token to prepend to output.\n append_token: Optional token to append to output.\n '
def __init__(self, delimiter=' ', tokens_feature_name='tokens', length_feature_name='length', source_copy_feature_name='source_copy_indices', schema_copy_feature_name='schema_copy_indices', prepend_token=None, append_token=None):
super(SchemaAndWordCopyingDecoder, self).__init__(delimiter=delimiter, tokens_feature_name=tokens_feature_name, length_feature_name=length_feature_name, prepend_token=prepend_token, append_token=append_token)
self.source_copy_feature_name = source_copy_feature_name
self.schema_copy_feature_name = schema_copy_feature_name
schema_tables = graph_utils.get_dict_from_collection('schema_tables')
self.schema_lookup_table = schema_tables['schema_file_lookup_table']
self.schema_strings_table = schema_tables['all_schema_strings']
def decode(self, data, items):
'\n Args:\n data: List of [target_string, source_tokens_list, schema_tokens_list].\n items: A list of strings, each of which indicate a particular data type.\n Returns: A tensor for each item in items.\n '
decoded_items = super(SchemaAndWordCopyingDecoder, self).decode(data, items)
indices = decoded_items.pop('indices')
schema_copies_indices = indices[1]
input_copies_indices = indices[0]
decoded_items[self.schema_copy_feature_name] = schema_copies_indices
decoded_items[self.source_copy_feature_name] = input_copies_indices
return [decoded_items[_] for _ in items]
def _prepend(self, tokens, indices):
(tokens, _) = super(SchemaAndWordCopyingDecoder, self)._prepend(tokens, indices)
schema_copies_indices = indices[1]
input_copies_indices = indices[0]
input_copies_indices = tf.concat([[0], input_copies_indices], 0, name='prepend_to_input_copies_indices')
schema_copies_indices = tf.concat([[0], schema_copies_indices], 0, name='prepend_to_schema_copies_indices')
return (tokens, [input_copies_indices, schema_copies_indices])
def _append(self, tokens, indices):
(tokens, _) = super(SchemaAndWordCopyingDecoder, self)._append(tokens, indices)
schema_copies_indices = indices[1]
input_copies_indices = indices[0]
input_copies_indices = tf.concat([input_copies_indices, [0]], 0, name='append_to_input_copies_indices')
schema_copies_indices = tf.concat([schema_copies_indices, [0]], 0, name='append_to_schema_copies_indices')
return (tokens, [input_copies_indices, schema_copies_indices])
def _mark_all_copies(self, tokens, data):
words = data[1]
(tokens, input_copies_indices) = self._mark_copies(tokens, words, 'COPY_WORD')
schema_location = data[2][0]
schema_id = self.schema_lookup_table.lookup(schema_location)
schema_string = self.schema_strings_table.lookup(schema_id)
schema = tf.string_split(schema_string, delimiter=' ').values
(tokens, schema_copies_indices) = self._mark_copies(tokens, schema, 'COPY_SCHEMA')
return (tokens, [input_copies_indices, schema_copies_indices])
def list_items(self):
items = super(SchemaAndWordCopyingDecoder, self).list_items()
items += [self.schema_copy_feature_name, self.source_copy_feature_name]
return items
|
class SchemaCopyingDecoder(BaseCopyingDecoder):
'\n CopyingDecoder that marks where the output sequence copies from the\n schema.\n\n Args:\n delimiter: Delimiter to split on. Must be a single character.\n tokens_feature_name: A descriptive feature name for the token values\n length_feature_name: A descriptive feature name for the length value\n schema_copy_feature_name: A descriptive feature name for the indices\n representing where in the schema a token is copied from.\n prepend_token: Optional token to prepend to output.\n append_token: Optional token to append to output.\n '
def __init__(self, delimiter=' ', tokens_feature_name='tokens', length_feature_name='length', schema_copy_feature_name='schema_copy_indices', prepend_token=None, append_token=None):
super(SchemaCopyingDecoder, self).__init__(delimiter=delimiter, tokens_feature_name=tokens_feature_name, length_feature_name=length_feature_name, prepend_token=prepend_token, append_token=append_token)
self.schema_copy_feature_name = schema_copy_feature_name
schema_tables = graph_utils.get_dict_from_collection('schema_tables')
self.schema_lookup_table = schema_tables['schema_file_lookup_table']
self.schema_strings_table = schema_tables['all_schema_strings']
def decode(self, data, items):
'\n Args:\n data: List of [target_string, None, schema_tokens_list].\n items: A list of strings, each of which indicate a particular data type.\n Returns: A tensor for each item in items.\n '
decoded_items = super(SchemaCopyingDecoder, self).decode(data, items)
indices = decoded_items.pop('indices')
schema_copies_indices = indices[0]
decoded_items[self.schema_copy_feature_name] = schema_copies_indices
return [decoded_items[_] for _ in items]
def _prepend(self, tokens, indices):
(tokens, _) = super(SchemaCopyingDecoder, self)._prepend(tokens, indices)
schema_copies_indices = indices[0]
schema_copies_indices = tf.concat([[0], schema_copies_indices], 0, name='prepend_to_schema_copies_indices')
return (tokens, [schema_copies_indices])
def _append(self, tokens, indices):
(tokens, _) = super(SchemaCopyingDecoder, self)._append(tokens, indices)
schema_copies_indices = indices[0]
schema_copies_indices = tf.concat([schema_copies_indices, [0]], 0, name='append_to_schema_copies_indices')
return (tokens, [schema_copies_indices])
def _mark_all_copies(self, tokens, data):
schema_location = data[2][0]
schema_id = self.schema_lookup_table.lookup(schema_location)
schema_string = self.schema_strings_table.lookup(schema_id)
schema = tf.string_split(schema_string, delimiter=' ').values
(tokens, schema_copies_indices) = self._mark_copies(tokens, schema, 'COPY_SCHEMA')
return (tokens, [schema_copies_indices])
def list_items(self):
items = super(SchemaCopyingDecoder, self).list_items()
items += [self.schema_copy_feature_name]
return items
|
class WordCopyingDecoder(BaseCopyingDecoder):
'\n CopyingDecoder that marks where the output sequence copies from the input\n sequence.\n\n Args:\n delimiter: Delimiter to split on. Must be a single character.\n tokens_feature_name: A descriptive feature name for the token values\n length_feature_name: A descriptive feature name for the length value\n source_copy_feature_name: A descriptive feature name for the indices\n representing where in the input sequence a word is copied from.\n prepend_token: Optional token to prepend to output.\n append_token: Optional token to append to output.\n '
def __init__(self, delimiter=' ', tokens_feature_name='tokens', length_feature_name='length', source_copy_feature_name='source_copy_indices', prepend_token=None, append_token=None):
super(WordCopyingDecoder, self).__init__(delimiter=delimiter, tokens_feature_name=tokens_feature_name, length_feature_name=length_feature_name, prepend_token=prepend_token, append_token=append_token)
self.source_copy_feature_name = source_copy_feature_name
def decode(self, data, items):
'\n Args:\n data: List of [target_string, source_tokens_list, None].\n items: A list of strings, each of which indicate a particular data type.\n Returns: A tensor for each item in items.\n '
decoded_items = super(WordCopyingDecoder, self).decode(data, items)
indices = decoded_items.pop('indices')
input_copies_indices = indices[0]
decoded_items[self.source_copy_feature_name] = input_copies_indices
print('decode', self.source_copy_feature_name)
return [decoded_items[_] for _ in items]
def _prepend(self, tokens, indices):
(tokens, _) = super(WordCopyingDecoder, self)._prepend(tokens, indices)
input_copies_indices = indices[0]
input_copies_indices = tf.concat([[0], input_copies_indices], 0, name='prepend_to_input_copies_indices')
return (tokens, [input_copies_indices])
def _append(self, tokens, indices):
(tokens, _) = super(WordCopyingDecoder, self)._append(tokens, indices)
input_copies_indices = indices[0]
input_copies_indices = tf.concat([input_copies_indices, [0]], 0, name='append_to_input_copies_indices')
return (tokens, [input_copies_indices])
def _mark_all_copies(self, tokens, data):
words = data[1]
(tokens, input_copies_indices) = self._mark_copies(tokens, words, 'COPY_WORD')
return (tokens, [input_copies_indices])
def list_items(self):
items = super(WordCopyingDecoder, self).list_items()
items += [self.source_copy_feature_name]
return items
|
def make_input_pipeline_from_def(def_dict, mode, **kwargs):
'Creates an InputPipeline object from a dictionary definition.\n\n Args:\n def_dict: A dictionary defining the input pipeline.\n It must have "class" and "params" that correspond to the class\n name and constructor parameters of an InputPipeline, respectively.\n mode: A value in tf.contrib.learn.ModeKeys\n\n Returns:\n A new InputPipeline object\n '
if (not ('class' in def_dict)):
raise ValueError('Input Pipeline definition must have a class property.')
class_ = def_dict['class']
if (not hasattr(sys.modules[__name__], class_)):
raise ValueError('Invalid Input Pipeline class: {}'.format(class_))
pipeline_class = getattr(sys.modules[__name__], class_)
params = {}
if ('params' in def_dict):
params.update(def_dict['params'])
params.update(kwargs)
return pipeline_class(params=params, mode=mode)
|
@six.add_metaclass(abc.ABCMeta)
class InputPipeline(Configurable):
'Abstract InputPipeline class. All input pipelines must inherit from this.\n An InputPipeline defines how data is read, parsed, and separated into\n features and labels.\n\n Params:\n shuffle: If true, shuffle the data.\n num_epochs: Number of times to iterate through the dataset. If None,\n iterate forever.\n '
def __init__(self, params, mode):
Configurable.__init__(self, params, mode)
@staticmethod
def default_params():
return {'shuffle': True, 'num_epochs': None}
def make_data_provider(self, **kwargs):
'Creates DataProvider instance for this input pipeline. Additional\n keyword arguments are passed to the DataProvider.\n '
raise NotImplementedError('Not implemented.')
@property
def feature_keys(self):
'Defines the features that this input pipeline provides. Returns\n a set of strings.\n '
return set()
@property
def label_keys(self):
'Defines the labels that this input pipeline provides. Returns\n a set of strings.\n '
return set()
@staticmethod
def read_from_data_provider(data_provider):
'Utility function to read all available items from a DataProvider.\n '
item_values = data_provider.get(list(data_provider.list_items()))
items_dict = dict(zip(data_provider.list_items(), item_values))
return items_dict
|
class ParallelTextInputPipeline(InputPipeline):
'An input pipeline that reads two parallel (line-by-line aligned) text\n files.\n\n Params:\n source_files: An array of file names for the source data.\n target_files: An array of file names for the target data. These must\n be aligned to the `source_files`.\n source_delimiter: A character to split the source text on. Defaults\n to " " (space). For character-level training this can be set to the\n empty string.\n target_delimiter: Same as `source_delimiter` but for the target text.\n '
@staticmethod
def default_params():
params = InputPipeline.default_params()
params.update({'source_files': [], 'target_files': [], 'source_delimiter': ' ', 'target_delimiter': ' ', 'build_schema_map_table': False, 'build_schema_text_table': False})
return params
def make_data_provider(self, **kwargs):
decoder_source = split_tokens_decoder.SplitTokensDecoder(tokens_feature_name='source_tokens', length_feature_name='source_len', append_token='SEQUENCE_END', delimiter=self.params['source_delimiter'])
dataset_source = tf.contrib.slim.dataset.Dataset(data_sources=self.params['source_files'], reader=tf.TextLineReader, decoder=decoder_source, num_samples=None, items_to_descriptions={})
dataset_target = None
if (len(self.params['target_files']) > 0):
decoder_target = split_tokens_decoder.SplitTokensDecoder(tokens_feature_name='target_tokens', length_feature_name='target_len', prepend_token='SEQUENCE_START', append_token='SEQUENCE_END', delimiter=self.params['target_delimiter'])
dataset_target = tf.contrib.slim.dataset.Dataset(data_sources=self.params['target_files'], reader=tf.TextLineReader, decoder=decoder_target, num_samples=None, items_to_descriptions={})
return parallel_data_provider.ParallelDataProvider(dataset1=dataset_source, dataset2=dataset_target, shuffle=self.params['shuffle'], num_epochs=self.params['num_epochs'], **kwargs)
@property
def feature_keys(self):
return set(['source_tokens', 'source_len'])
@property
def label_keys(self):
return set(['target_tokens', 'target_len'])
|
class ParallelTextAndMaskInputPipeline(ParallelTextInputPipeline):
@staticmethod
def default_params():
params = ParallelTextInputPipeline.default_params()
params.update({'decoder_mask_files': []})
return params
def make_data_provider(self, **kwargs):
target_files = self.params['target_files']
if (not target_files):
target_files = None
print('make mask data provider args:', self.params['source_files'], target_files, self.params['decoder_mask_files'], self.params['shuffle'], self.params['num_epochs'], self.params['source_delimiter'], self.params['target_delimiter'])
if (kwargs is not None):
for (key, value) in kwargs.iteritems():
print(key, value)
print('source_files', self.params['source_files'])
return triple_data_provider.make_triple_data_provider(self.params['source_files'], target_files, self.params['decoder_mask_files'], shuffle=self.params['shuffle'], num_epochs=self.params['num_epochs'], source_delimiter=self.params['source_delimiter'], target_delimiter=self.params['target_delimiter'], **kwargs)
@property
def feature_keys(self):
return set(['source_tokens', 'source_len', 'decoder_mask'])
|
class ParallelTextAndSchemaInputPipeline(ParallelTextInputPipeline):
'\n An input pipeline that reads three parallel (line-by-line aligned) text files:\n a source, a target, and a schema location.\n\n Params:\n source_files: An array of file names for the source data.\n target_files: An array of file names for the target data. These must\n be aligned to the `source_files`.\n schema_loc_files: An array of file names for the schema locations. Each\n file includes one schema location per line, aligned to the source_files.\n source_delimiter: A character to split the source text on. Defaults\n to " " (space). For character-level training this can be set to the\n empty string.\n target_delimiter: Same as `source_delimiter` but for the target text.\n '
@staticmethod
def default_params():
params = ParallelTextInputPipeline.default_params()
params.update({'schema_loc_files': []})
return params
def _build_schema_lookup_tables(self):
schema_loc_files = self.params['schema_loc_files']
all_schema_locations = set()
for loc_file in schema_loc_files:
with open(loc_file, 'r') as f:
locations = [l.strip() for l in f.readlines()]
all_schema_locations.update(locations)
all_schema_locations = list(all_schema_locations)
schema_file_lookup_table = tf.contrib.lookup.index_table_from_tensor(mapping=all_schema_locations, num_oov_buckets=0, default_value=(- 1))
schema_embeddings_matrices = []
schema_lengths = []
if self.params['build_schema_text_table']:
all_schema_strings = []
if self.params['build_schema_map_table']:
schema_map_matrices = []
schema_map_lengths = []
def load_npy(matrix_list, length_list, file_location, fname):
npy_file = os.path.join(file_location, fname)
matrix_np = np.load(npy_file)
matrix_list.append(matrix_np)
length = matrix_np.shape[0]
length_list.append(length)
for schema_location in all_schema_locations:
print('current_schema_location', schema_location)
load_npy(schema_embeddings_matrices, schema_lengths, schema_location, 'schema_embeddings.npy')
if self.params['build_schema_map_table']:
load_npy(schema_map_matrices, schema_map_lengths, schema_location, 'schema_map.npy')
if self.params['build_schema_text_table']:
schema_csv_file = os.path.join(schema_location, 'schema.csv')
schema_string = self.get_schema_strings(schema_csv_file)
all_schema_strings.append(schema_string)
max_emb_len = max(schema_lengths)
schema_lengths = tf.constant(schema_lengths)
def pad_to_size(matrix, length):
if (matrix.shape[0] == length):
return matrix
padding_size = (length - matrix.shape[0])
padded = np.pad(matrix, pad_width=((0, padding_size), (0, 0)), mode='constant', constant_values=0)
return padded
schema_embeddings_matrices = [pad_to_size(m, max_emb_len) for m in schema_embeddings_matrices]
all_schema_embeddings = tf.convert_to_tensor(np.asarray(schema_embeddings_matrices), dtype=tf.float32)
tables_dict = {'schema_file_lookup_table': schema_file_lookup_table, 'all_schema_embeddings': all_schema_embeddings, 'schema_lengths': schema_lengths}
if self.params['build_schema_text_table']:
schema_strings_tbl = tf.contrib.lookup.index_to_string_table_from_tensor(all_schema_strings, name='schema_strings_lookup_table')
tables_dict['all_schema_strings'] = schema_strings_tbl
if self.params['build_schema_map_table']:
max_map_len = max(schema_map_lengths)
schema_map_lengths = tf.constant(schema_map_lengths)
schema_map_matrices = [pad_to_size(m, max_map_len) for m in schema_map_matrices]
all_schema_maps = tf.convert_to_tensor(np.asarray(schema_map_matrices), dtype=tf.float32)
tables_dict['all_schema_maps'] = all_schema_maps
tables_dict['schema_map_lengths'] = schema_map_lengths
graph_utils.add_dict_to_collection(tables_dict, 'schema_tables')
def make_data_provider(self, **kwargs):
self._build_schema_lookup_tables()
target_files = self.params['target_files']
if (not target_files):
target_files = None
print('make data provider args:', self.params['source_files'], target_files, self.params['schema_loc_files'], self.params['shuffle'], self.params['num_epochs'], self.params['source_delimiter'], self.params['target_delimiter'])
if (kwargs is not None):
for (key, value) in kwargs.iteritems():
print(key, value)
print('source_files', self.params['source_files'])
return triple_data_provider.make_triple_data_provider(self.params['source_files'], target_files, self.params['schema_loc_files'], shuffle=self.params['shuffle'], num_epochs=self.params['num_epochs'], source_delimiter=self.params['source_delimiter'], target_delimiter=self.params['target_delimiter'], **kwargs)
@property
def feature_keys(self):
return set(['source_tokens', 'source_len', 'schema_loc'])
|
class ParallelTextAndSchemaMapInputPipeline(ParallelTextAndSchemaInputPipeline):
'\n An input pipeline that reads three parallel (line-by-line aligned) text files:\n a source, a target, and a schema location. Expects both schema embeddings and\n schema map at the schema locations.\n\n Params:\n source_files: An array of file names for the source data.\n target_files: An array of file names for the target data. These must\n be aligned to the `source_files`.\n schema_loc_files: An array of file names for the schema locations. Each\n file includes one schema location per line, aligned to the source_files.\n source_delimiter: A character to split the source text on. Defaults\n to " " (space). For character-level training this can be set to the\n empty string.\n target_delimiter: Same as `source_delimiter` but for the target text.\n '
@staticmethod
def default_params():
params = ParallelTextAndSchemaInputPipeline.default_params()
params.update({'build_schema_map_table': True})
return params
@property
def feature_keys(self):
return set(['source_tokens', 'source_len', 'decoder_mask'])
|
class ParallelTextAndMaskCopyingPipeline(ParallelTextAndMaskInputPipeline):
def make_data_provider(self, **kwargs):
target_files = self.params['target_files']
if (not target_files):
target_files = None
return self._get_copying_data_provider(target_files, **kwargs)
def _get_copying_data_provider(self, target_files, **kwargs):
return copying_data_provider.make_word_copying_data_provider(self.params['source_files'], target_files, self.params['decoder_mask_files'], num_epochs=self.params['num_epochs'], shuffle=self.params['shuffle'], source_delimiter=self.params['source_delimiter'], target_delimiter=self.params['target_delimiter'], **kwargs)
def _get_copying_decoder(self, tokens_feature_name, length_feature_name, prepend_token, append_token, delimiter):
return copying_decoder.WordCopyingDecoder(tokens_feature_name=tokens_feature_name, length_feature_name=length_feature_name, prepend_token=prepend_token, append_token=append_token, delimiter=delimiter)
@property
def feature_keys(self):
return set(['source_tokens', 'source_len', 'decoder_mask'])
@property
def label_keys(self):
return set(['target_tokens', 'target_len', 'source_copy_indices'])
|
class BaseParallelCopyingPipeline(ParallelTextAndSchemaInputPipeline):
'A base class for copying input pipeline that reads three parallel\n (line-by-line aligned) text files and identifies tokens copied from\n the schema or source.\n\n Params:\n source_files: An array of file names for the source data.\n target_files: An array of file names for the target data. These must\n be aligned to the `source_files`.\n names in column 0 and "" or field name in column 1.\n schema_loc_files: An array of file names for the schema locations. Each\n file includes one schema location per line, aligned to the source_files.\n source_delimiter: A character to split the source text on. Defaults\n to " " (space). For character-level training this can be set to the\n empty string.\n target_delimiter: Same as `source_delimiter` but for the target text.\n '
def make_data_provider(self, **kwargs):
self._build_schema_lookup_tables()
target_files = self.params['target_files']
if (not target_files):
target_files = None
return self._get_copying_data_provider(target_files, **kwargs)
def _get_copying_data_provider(self, target_files, **kwargs):
raise NotImplementedError
def _get_copying_decoder(self, tokens_feature_name, length_feature_name, prepend_token, append_token, delimiter):
raise NotImplementedError
@property
def feature_keys(self):
return set(['source_tokens', 'source_len', 'schema_loc'])
@property
def label_keys(self):
return set(['target_tokens', 'target_len'])
|
class ParallelSchemaCopyingPipeline(BaseParallelCopyingPipeline):
'A copying input pipeline that reads two parallel (line-by-line aligned)\n text files and a schema. It identifies tokens copied from the schema.\n\n Params:\n source_files: An array of file names for the source data.\n target_files: An array of file names for the target data. These must\n be aligned to the `source_files`.\n source_delimiter: A character to split the source text on. Defaults\n to " " (space). For character-level training this can be set to the\n empty string.\n target_delimiter: Same as `source_delimiter` but for the target text.\n '
@staticmethod
def default_params():
params = BaseParallelCopyingPipeline.default_params()
params.update({'build_schema_text_table': True})
return params
def _get_copying_data_provider(self, target_files, **kwargs):
return copying_data_provider.make_schema_copying_data_provider(self.params['source_files'], target_files, self.params['schema_loc_files'], num_epochs=self.params['num_epochs'], shuffle=self.params['shuffle'], source_delimiter=self.params['source_delimiter'], target_delimiter=self.params['target_delimiter'], **kwargs)
@property
def label_keys(self):
keys = super(ParallelSchemaCopyingPipeline, self).label_keys
keys.update({'schema_copy_indices'})
return keys
def get_schema_strings(self, schema_filename):
schema_strings = []
with open(schema_filename, 'r') as csvfile:
reader = csv.reader(csvfile)
is_header_row = True
for row in reader:
if is_header_row:
is_header_row = False
continue
table_name = row[0].strip()
field_name = row[1].strip()
if (len(field_name) == 0):
schema_strings.append(table_name)
else:
schema_strings.append(((table_name + ',') + field_name))
return ' '.join(schema_strings)
|
class ParallelTextAndSchemaCopyingPipeline(ParallelSchemaCopyingPipeline):
'A copying input pipeline that reads two parallel (line-by-line aligned)\n text files and a schema. It identifies tokens copied from both the schema\n and source.\n\n Params:\n source_files: An array of file names for the source data.\n target_files: An array of file names for the target data. These must\n be aligned to the `source_files`.\n schema_loc_files: An array of file names for the schema locations directories. Each directory must include a schema_embeddings.npy file and a schema.csv file. The latter should have table name in the first column and field name in the second.\n source_delimiter: A character to split the source text on. Defaults\n to " " (space). For character-level training this can be set to the\n empty string.\n target_delimiter: Same as `source_delimiter` but for the target text.\n '
def _get_copying_decoder(self, tokens_feature_name, length_feature_name, prepend_token, append_token, delimiter):
return copying_decoder.SchemaAndWordCopyingDecoder(tokens_feature_name=tokens_feature_name, length_feature_name=length_feature_name, prepend_token=prepend_token, append_token=append_token, delimiter=delimiter)
def _get_copying_data_provider(self, target_files, **kwargs):
return copying_data_provider.make_schema_and_word_copying_data_provider(self.params['source_files'], target_files, self.params['schema_loc_files'], num_epochs=self.params['num_epochs'], shuffle=self.params['shuffle'], source_delimiter=self.params['source_delimiter'], target_delimiter=self.params['target_delimiter'], **kwargs)
@property
def label_keys(self):
keys = super(ParallelTextAndSchemaCopyingPipeline, self).label_keys
keys.update({'source_copy_indices'})
return keys
|
class ParallelTextCopyingPipeline(BaseParallelCopyingPipeline):
'A copying input pipeline that reads two parallel (line-by-line aligned)\n text files. It identifies tokens copied from the input sequence to the\n output sequence.\n\n Params:\n source_files: An array of file names for the source data.\n target_files: An array of file names for the target data. These must\n be aligned to the `source_files`.\n source_delimiter: A character to split the source text on. Defaults\n to " " (space). For character-level training this can be set to the\n empty string.\n target_delimiter: Same as `source_delimiter` but for the target text.\n '
def _get_copying_decoder(self, tokens_feature_name, length_feature_name, prepend_token, append_token, delimiter):
return copying_decoder.WordCopyingDecoder(tokens_feature_name=tokens_feature_name, length_feature_name=length_feature_name, prepend_token=prepend_token, append_token=append_token, delimiter=delimiter)
def _get_copying_data_provider(self, target_files, **kwargs):
return copying_data_provider.make_word_copying_data_provider(self.params['source_files'], target_files, self.params['schema_loc_files'], num_epochs=self.params['num_epochs'], shuffle=self.params['shuffle'], source_delimiter=self.params['source_delimiter'], target_delimiter=self.params['target_delimiter'], **kwargs)
@property
def label_keys(self):
keys = super(ParallelTextCopyingPipeline, self).label_keys
keys.update({'source_copy_indices'})
return keys
|
class TFRecordInputPipeline(InputPipeline):
'An input pipeline that reads a TFRecords containing both source\n and target sequences.\n\n Params:\n files: An array of file names to read from.\n source_field: The TFRecord feature field containing the source text.\n target_field: The TFRecord feature field containing the target text.\n source_delimiter: A character to split the source text on. Defaults\n to " " (space). For character-level training this can be set to the\n empty string.\n target_delimiter: Same as `source_delimiter` but for the target text.\n '
@staticmethod
def default_params():
params = InputPipeline.default_params()
params.update({'files': [], 'source_field': 'source', 'target_field': 'target', 'source_delimiter': ' ', 'target_delimiter': ' '})
return params
def make_data_provider(self, **kwargs):
splitter_source = split_tokens_decoder.SplitTokensDecoder(tokens_feature_name='source_tokens', length_feature_name='source_len', append_token='SEQUENCE_END', delimiter=self.params['source_delimiter'])
splitter_target = split_tokens_decoder.SplitTokensDecoder(tokens_feature_name='target_tokens', length_feature_name='target_len', prepend_token='SEQUENCE_START', append_token='SEQUENCE_END', delimiter=self.params['target_delimiter'])
keys_to_features = {self.params['source_field']: tf.FixedLenFeature((), tf.string), self.params['target_field']: tf.FixedLenFeature((), tf.string, default_value='')}
items_to_handlers = {}
items_to_handlers['source_tokens'] = tfexample_decoder.ItemHandlerCallback(keys=[self.params['source_field']], func=(lambda dict: splitter_source.decode(dict[self.params['source_field']], ['source_tokens'])[0]))
items_to_handlers['source_len'] = tfexample_decoder.ItemHandlerCallback(keys=[self.params['source_field']], func=(lambda dict: splitter_source.decode(dict[self.params['source_field']], ['source_len'])[0]))
items_to_handlers['target_tokens'] = tfexample_decoder.ItemHandlerCallback(keys=[self.params['target_field']], func=(lambda dict: splitter_target.decode(dict[self.params['target_field']], ['target_tokens'])[0]))
items_to_handlers['target_len'] = tfexample_decoder.ItemHandlerCallback(keys=[self.params['target_field']], func=(lambda dict: splitter_target.decode(dict[self.params['target_field']], ['target_len'])[0]))
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)
dataset = tf.contrib.slim.dataset.Dataset(data_sources=self.params['files'], reader=tf.TFRecordReader, decoder=decoder, num_samples=None, items_to_descriptions={})
return tf.contrib.slim.dataset_data_provider.DatasetDataProvider(dataset=dataset, shuffle=self.params['shuffle'], num_epochs=self.params['num_epochs'], **kwargs)
@property
def feature_keys(self):
return set(['source_tokens', 'source_len'])
@property
def label_keys(self):
return set(['target_tokens', 'target_len'])
|
class ImageCaptioningInputPipeline(InputPipeline):
'An input pipeline that reads a TFRecords containing both source\n and target sequences.\n\n Params:\n files: An array of file names to read from.\n source_field: The TFRecord feature field containing the source text.\n target_field: The TFRecord feature field containing the target text.\n source_delimiter: A character to split the source text on. Defaults\n to " " (space). For character-level training this can be set to the\n empty string.\n target_delimiter: Same as `source_delimiter` but for the target text.\n '
@staticmethod
def default_params():
params = InputPipeline.default_params()
params.update({'files': [], 'image_field': 'image/data', 'image_format': 'jpg', 'caption_ids_field': 'image/caption_ids', 'caption_tokens_field': 'image/caption'})
return params
def make_data_provider(self, **kwargs):
context_keys_to_features = {self.params['image_field']: tf.FixedLenFeature([], dtype=tf.string), 'image/format': tf.FixedLenFeature([], dtype=tf.string, default_value=self.params['image_format'])}
sequence_keys_to_features = {self.params['caption_ids_field']: tf.FixedLenSequenceFeature([], dtype=tf.int64), self.params['caption_tokens_field']: tf.FixedLenSequenceFeature([], dtype=tf.string)}
items_to_handlers = {'image': tfexample_decoder.Image(image_key=self.params['image_field'], format_key='image/format', channels=3), 'target_ids': tfexample_decoder.Tensor(self.params['caption_ids_field']), 'target_tokens': tfexample_decoder.Tensor(self.params['caption_tokens_field']), 'target_len': tfexample_decoder.ItemHandlerCallback(keys=[self.params['caption_tokens_field']], func=(lambda x: tf.size(x[self.params['caption_tokens_field']])))}
decoder = TFSEquenceExampleDecoder(context_keys_to_features, sequence_keys_to_features, items_to_handlers)
dataset = tf.contrib.slim.dataset.Dataset(data_sources=self.params['files'], reader=tf.TFRecordReader, decoder=decoder, num_samples=None, items_to_descriptions={})
return tf.contrib.slim.dataset_data_provider.DatasetDataProvider(dataset=dataset, shuffle=self.params['shuffle'], num_epochs=self.params['num_epochs'], **kwargs)
@property
def feature_keys(self):
return set(['image'])
@property
def label_keys(self):
return set(['target_tokens', 'target_ids', 'target_len'])
|
def make_parallel_data_provider(data_sources_source, data_sources_target, reader=tf.TextLineReader, num_samples=None, source_delimiter=' ', target_delimiter=' ', **kwargs):
'Creates a DataProvider that reads parallel text data.\n\n Args:\n data_sources_source: A list of data sources for the source text files.\n data_sources_target: A list of data sources for the target text files.\n Can be None for inference mode.\n num_samples: Optional, number of records in the dataset\n delimiter: Split tokens in the data on this delimiter. Defaults to space.\n kwargs: Additional arguments (shuffle, num_epochs, etc) that are passed\n to the data provider\n\n Returns:\n A DataProvider instance\n '
decoder_source = split_tokens_decoder.SplitTokensDecoder(tokens_feature_name='source_tokens', length_feature_name='source_len', append_token='SEQUENCE_END', delimiter=source_delimiter)
dataset_source = tf.contrib.slim.dataset.Dataset(data_sources=data_sources_source, reader=reader, decoder=decoder_source, num_samples=num_samples, items_to_descriptions={})
dataset_target = None
if (data_sources_target is not None):
decoder_target = split_tokens_decoder.SplitTokensDecoder(tokens_feature_name='target_tokens', length_feature_name='target_len', prepend_token='SEQUENCE_START', append_token='SEQUENCE_END', delimiter=target_delimiter)
dataset_target = tf.contrib.slim.dataset.Dataset(data_sources=data_sources_target, reader=reader, decoder=decoder_target, num_samples=num_samples, items_to_descriptions={})
return ParallelDataProvider(dataset1=dataset_source, dataset2=dataset_target, **kwargs)
|
class ParallelDataProvider(data_provider.DataProvider):
'Creates a ParallelDataProvider. This data provider reads two datasets\n in parallel, keeping them aligned.\n\n Args:\n dataset1: The first dataset. An instance of the Dataset class.\n dataset2: The second dataset. An instance of the Dataset class.\n Can be None. If None, only `dataset1` is read.\n num_readers: The number of parallel readers to use.\n shuffle: Whether to shuffle the data sources and common queue when\n reading.\n num_epochs: The number of times each data source is read. If left as None,\n the data will be cycled through indefinitely.\n common_queue_capacity: The capacity of the common queue.\n common_queue_min: The minimum number of elements in the common queue after\n a dequeue.\n seed: The seed to use if shuffling.\n '
def __init__(self, dataset1, dataset2, shuffle=True, num_epochs=None, common_queue_capacity=4096, common_queue_min=1024, seed=None):
if (seed is None):
seed = np.random.randint(1000000000.0)
(_, data_source) = parallel_reader.parallel_read(dataset1.data_sources, reader_class=dataset1.reader, num_epochs=num_epochs, num_readers=1, shuffle=False, capacity=common_queue_capacity, min_after_dequeue=common_queue_min, seed=seed)
data_target = ''
if (dataset2 is not None):
(_, data_target) = parallel_reader.parallel_read(dataset2.data_sources, reader_class=dataset2.reader, num_epochs=num_epochs, num_readers=1, shuffle=False, capacity=common_queue_capacity, min_after_dequeue=common_queue_min, seed=seed)
if shuffle:
shuffle_queue = tf.RandomShuffleQueue(capacity=common_queue_capacity, min_after_dequeue=common_queue_min, dtypes=[tf.string, tf.string], seed=seed)
enqueue_ops = []
enqueue_ops.append(shuffle_queue.enqueue([data_source, data_target]))
tf.train.add_queue_runner(tf.train.QueueRunner(shuffle_queue, enqueue_ops))
(data_source, data_target) = shuffle_queue.dequeue()
items = dataset1.decoder.list_items()
tensors = dataset1.decoder.decode(data_source, items)
if (dataset2 is not None):
items2 = dataset2.decoder.list_items()
tensors2 = dataset2.decoder.decode(data_target, items2)
items = (items + items2)
tensors = (tensors + tensors2)
super(ParallelDataProvider, self).__init__(items_to_tensors=dict(zip(items, tensors)), num_samples=dataset1.num_samples)
|
def strip_bpe(text):
'Deodes text that was processed using BPE from\n https://github.com/rsennrich/subword-nmt'
return text.replace('@@ ', '').strip()
|
def decode_sentencepiece(text):
'Decodes text that uses https://github.com/google/sentencepiece encoding.\n Assumes that pieces are separated by a space'
return ''.join(text.split(' ')).replace('▁', ' ').strip()
|
def slice_text(text, eos_token='SEQUENCE_END', sos_token='SEQUENCE_START'):
'Slices text from SEQUENCE_START to SEQUENCE_END, not including\n these special tokens.\n '
eos_index = text.find(eos_token)
text = (text[:eos_index] if (eos_index > (- 1)) else text)
sos_index = text.find(sos_token)
text = (text[(sos_index + len(sos_token)):] if (sos_index > (- 1)) else text)
return text.strip()
|
class TFSEquenceExampleDecoder(data_decoder.DataDecoder):
"A decoder for TensorFlow Examples.\n Decoding Example proto buffers is comprised of two stages: (1) Example parsing\n and (2) tensor manipulation.\n In the first stage, the tf.parse_example function is called with a list of\n FixedLenFeatures and SparseLenFeatures. These instances tell TF how to parse\n the example. The output of this stage is a set of tensors.\n In the second stage, the resulting tensors are manipulated to provide the\n requested 'item' tensors.\n To perform this decoding operation, an ExampleDecoder is given a list of\n ItemHandlers. Each ItemHandler indicates the set of features for stage 1 and\n contains the instructions for post_processing its tensors for stage 2.\n "
def __init__(self, context_keys_to_features, sequence_keys_to_features, items_to_handlers):
"Constructs the decoder.\n Args:\n keys_to_features: a dictionary from TF-Example keys to either\n tf.VarLenFeature or tf.FixedLenFeature instances. See tensorflow's\n parsing_ops.py.\n items_to_handlers: a dictionary from items (strings) to ItemHandler\n instances. Note that the ItemHandler's are provided the keys that they\n use to return the final item Tensors.\n "
self._context_keys_to_features = context_keys_to_features
self._sequence_keys_to_features = sequence_keys_to_features
self._items_to_handlers = items_to_handlers
def list_items(self):
'See base class.'
return list(self._items_to_handlers.keys())
def decode(self, serialized_example, items=None):
'Decodes the given serialized TF-example.\n Args:\n serialized_example: a serialized TF-example tensor.\n items: the list of items to decode. These must be a subset of the item\n keys in self._items_to_handlers. If `items` is left as None, then all\n of the items in self._items_to_handlers are decoded.\n Returns:\n the decoded items, a list of tensor.\n '
(context, sequence) = tf.parse_single_sequence_example(serialized_example, self._context_keys_to_features, self._sequence_keys_to_features)
example = {}
example.update(context)
example.update(sequence)
all_features = {}
all_features.update(self._context_keys_to_features)
all_features.update(self._sequence_keys_to_features)
for (k, value) in all_features.items():
if isinstance(value, tf.FixedLenFeature):
example[k] = tf.reshape(example[k], value.shape)
if (not items):
items = self._items_to_handlers.keys()
outputs = []
for item in items:
handler = self._items_to_handlers[item]
keys_to_tensors = {key: example[key] for key in handler.keys}
outputs.append(handler.tensors_to_item(keys_to_tensors))
return outputs
|
class SplitMaskDecoder(data_decoder.DataDecoder):
'A DataProvider that splits a string tensor into individual tokens and\n returns the tokens and the length.\n Optionally prepends or appends special tokens.\n\n Args:\n delimiter: Delimiter to split on. Must be a single character.\n tokens_feature_name: A descriptive feature name for the token values\n length_feature_name: A descriptive feature name for the length value\n '
def __init__(self, delimiter=' ', decoder_mask_feature_name='decoder_mask'):
self.delimiter = delimiter
self.decoder_mask_feature_name = decoder_mask_feature_name
def decode(self, data, items):
decoded_items = {}
tokens = tf.string_split([data], delimiter=self.delimiter).values
decoder_mask = tf.string_to_number(tokens)
decoded_items[self.decoder_mask_feature_name] = decoder_mask
return [decoded_items[_] for _ in items]
def list_items(self):
return [self.decoder_mask_feature_name]
|
class SplitTokensDecoder(data_decoder.DataDecoder):
'A DataProvider that splits a string tensor into individual tokens and\n returns the tokens and the length.\n Optionally prepends or appends special tokens.\n\n Args:\n delimiter: Delimiter to split on. Must be a single character.\n tokens_feature_name: A descriptive feature name for the token values\n length_feature_name: A descriptive feature name for the length value\n '
def __init__(self, delimiter=' ', tokens_feature_name='tokens', length_feature_name='length', prepend_token=None, append_token=None):
self.delimiter = delimiter
self.tokens_feature_name = tokens_feature_name
self.length_feature_name = length_feature_name
self.prepend_token = prepend_token
self.append_token = append_token
def decode(self, data, items):
decoded_items = {}
tokens = tf.string_split([data], delimiter=self.delimiter).values
if (self.prepend_token is not None):
tokens = tf.concat([[self.prepend_token], tokens], 0)
if (self.append_token is not None):
tokens = tf.concat([tokens, [self.append_token]], 0)
decoded_items[self.length_feature_name] = tf.size(tokens)
decoded_items[self.tokens_feature_name] = tokens
return [decoded_items[_] for _ in items]
def list_items(self):
return [self.tokens_feature_name, self.length_feature_name]
|
def make_triple_data_provider(data_sources_source, data_sources_target, data_sources_schema, reader=tf.TextLineReader, num_samples=None, source_delimiter=' ', target_delimiter=' ', **kwargs):
'Creates a DataProvider that reads parallel text data.\n\n Args:\n data_sources_source: A list of data sources for the source text files.\n data_sources_target: A list of data sources for the target text files.\n Can be None for inference mode.\n data_sources_schema: A list of data sources for the schema location text files.\n num_samples: Optional, number of records in the dataset\n delimiter: Split tokens in the data on this delimiter. Defaults to space.\n kwargs: Additional arguments (shuffle, num_epochs, etc) that are passed\n to the data provider\n\n Returns:\n A DataProvider instance\n '
decoder_source = split_tokens_decoder.SplitTokensDecoder(tokens_feature_name='source_tokens', length_feature_name='source_len', append_token='SEQUENCE_END', delimiter=source_delimiter)
print(decoder_source)
print('schema data source', data_sources_schema)
dataset_source = tf.contrib.slim.dataset.Dataset(data_sources=data_sources_source, reader=reader, decoder=decoder_source, num_samples=num_samples, items_to_descriptions={})
dataset_target = None
if (data_sources_target is not None):
decoder_target = split_tokens_decoder.SplitTokensDecoder(tokens_feature_name='target_tokens', length_feature_name='target_len', prepend_token='SEQUENCE_START', append_token='SEQUENCE_END', delimiter=target_delimiter)
dataset_target = tf.contrib.slim.dataset.Dataset(data_sources=data_sources_target, reader=reader, decoder=decoder_target, num_samples=num_samples, items_to_descriptions={})
decoder_schemas = split_tokens_decoder.SplitMaskDecoder(decoder_mask_feature_name='decoder_mask', delimiter=' ')
dataset_schemas = tf.contrib.slim.dataset.Dataset(data_sources=data_sources_schema, reader=reader, decoder=decoder_schemas, num_samples=num_samples, items_to_descriptions={})
return TripleDataProvider(dataset1=dataset_source, dataset2=dataset_target, schemas=dataset_schemas, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.