code
stringlengths
17
6.64M
class TripleDataProvider(data_provider.DataProvider): 'Creates a TripleDataProvider. This data provider reads two datasets\n and their list of schemas in parallel, keep g them aligned.\n\n Args:\n dataset1: The first dataset. An instance of the Dataset class.\n dataset2: The second dataset. An instance of the Dataset class.\n Can be None. If None, only `dataset1` is read.\n schemas: The schema locations.\n num_readers: The number of parallel readers to use.\n shuffle: Whether to shuffle the data sources and common queue when\n reading.\n num_epochs: The number of times each data source is read. If left as None,\n the data will be cycled through indefinitely.\n common_queue_capacity: The capacity of the common queue.\n common_queue_min: The minimum number of elements in the common queue after\n a dequeue.\n seed: The seed to use if shuffling.\n ' def __init__(self, dataset1, dataset2, schemas=None, shuffle=True, num_epochs=None, common_queue_capacity=4096, common_queue_min=1024, seed=None): if (seed is None): seed = np.random.randint(1000000000.0) (_, data_source) = parallel_reader.parallel_read(dataset1.data_sources, reader_class=dataset1.reader, num_epochs=num_epochs, num_readers=1, shuffle=False, capacity=common_queue_capacity, min_after_dequeue=common_queue_min, seed=seed) data_target = '' if (dataset2 is not None): (_, data_target) = parallel_reader.parallel_read(dataset2.data_sources, reader_class=dataset2.reader, num_epochs=num_epochs, num_readers=1, shuffle=False, capacity=common_queue_capacity, min_after_dequeue=common_queue_min, seed=seed) data_schemas = '' print('schemas.data_sources', schemas.data_sources) if (schemas is not None): (_, data_schemas) = parallel_reader.parallel_read(schemas.data_sources, reader_class=schemas.reader, num_epochs=num_epochs, num_readers=1, shuffle=False, capacity=common_queue_capacity, min_after_dequeue=common_queue_min, seed=seed) if shuffle: shuffle_queue = tf.RandomShuffleQueue(capacity=common_queue_capacity, min_after_dequeue=common_queue_min, dtypes=[tf.string, tf.string, tf.string], seed=seed) enqueue_ops = [] enqueue_ops.append(shuffle_queue.enqueue([data_source, data_target, data_schemas])) tf.train.add_queue_runner(tf.train.QueueRunner(shuffle_queue, enqueue_ops)) (data_source, data_target, data_schemas) = shuffle_queue.dequeue() items = dataset1.decoder.list_items() tensors = dataset1.decoder.decode(data_source, items) if (dataset2 is not None): items2 = dataset2.decoder.list_items() print('items2', items2) print('data_target', data_target) tensors2 = dataset2.decoder.decode(data_target, items2) items = (items + items2) tensors = (tensors + tensors2) if (schemas is not None): items_schema = schemas.decoder.list_items() tensors_schema = schemas.decoder.decode(data_schemas, items_schema) print('items_schema', items_schema) print('tensor_schema', tensors_schema) sess = tf.Session() items = (items + items_schema) tensors = (tensors + tensors_schema) super(TripleDataProvider, self).__init__(items_to_tensors=dict(zip(items, tensors)), num_samples=dataset1.num_samples)
class VocabInfo(collections.namedtuple('VocbabInfo', ['path', 'vocab_size', 'special_vocab'])): 'Convenience structure for vocabulary information.\n ' @property def total_size(self): 'Returns size the the base vocabulary plus the size of extra vocabulary' return (self.vocab_size + len(self.special_vocab))
def get_vocab_info(vocab_path): 'Creates a `VocabInfo` instance that contains the vocabulary size and\n the special vocabulary for the given file.\n\n Args:\n vocab_path: Path to a vocabulary file with one word per line.\n\n Returns:\n A VocabInfo tuple.\n ' with gfile.GFile(vocab_path) as file: vocab_size = sum((1 for _ in file)) special_vocab = get_special_vocab(vocab_size) return VocabInfo(vocab_path, vocab_size, special_vocab)
def get_special_vocab(vocabulary_size): 'Returns the `SpecialVocab` instance for a given vocabulary size.\n ' return SpecialVocab(*range(vocabulary_size, (vocabulary_size + 5)))
def read_vocab(filename): 'Reads vocab file into the memory and adds special-vocab to it.\n\n Args:\n filename: Path to a vocabulary file containg one word per line.\n Each word is mapped to its line number.\n \n Returns:\n A tuple (vocab, counts, special_vocab)\n ' tf.logging.info(('Reading vocabulary from %s' % filename)) with gfile.GFile(filename) as file: vocab = list((line.strip('\n') for line in file)) vocab_size = len(vocab) has_counts = (len(vocab[0].split('\t')) == 2) if has_counts: (vocab, counts) = zip(*[_.split('\t') for _ in vocab]) counts = [float(_) for _ in counts] vocab = list(vocab) else: counts = [(- 1.0) for _ in vocab] special_vocab = get_special_vocab(vocab_size) vocab += list(special_vocab._fields) counts += [(- 1.0) for _ in list(special_vocab._fields)] return (vocab, counts, special_vocab)
def create_vocabulary_lookup_table(filename, default_value=None): 'Creates a lookup table for a vocabulary file.\n Args:\n filename: Path to a vocabulary file containg one word per line.\n Each word is mapped to its line number.\n default_value: UNK tokens will be mapped to this id.\n If None, UNK tokens will be mapped to [vocab_size]\n Returns:\n A tuple (vocab_to_id_table, id_to_vocab_table,\n word_to_count_table, vocab_size). The vocab size does not include\n the UNK token.\n ' if (not gfile.Exists(filename)): raise ValueError('File does not exist: {}'.format(filename)) with gfile.GFile(filename) as file: vocab = list((line.strip('\n') for line in file)) vocab_size = len(vocab) has_counts = (len(vocab[0].split('\t')) == 2) if has_counts: (vocab, counts) = zip(*[_.split('\t') for _ in vocab]) counts = [float(_) for _ in counts] vocab = list(vocab) else: counts = [(- 1.0) for _ in vocab] special_vocab = get_special_vocab(vocab_size) vocab += list(special_vocab._fields) vocab_size += len(special_vocab) counts += [(- 1.0) for _ in list(special_vocab._fields)] if (default_value is None): default_value = special_vocab.UNK tf.logging.info('Creating vocabulary lookup table of size %d', vocab_size) vocab_tensor = tf.constant(vocab) count_tensor = tf.constant(counts, dtype=tf.float32) vocab_idx_tensor = tf.range(vocab_size, dtype=tf.int64) id_to_vocab_init = tf.contrib.lookup.KeyValueTensorInitializer(vocab_idx_tensor, vocab_tensor, tf.int64, tf.string) id_to_vocab_table = tf.contrib.lookup.HashTable(id_to_vocab_init, 'UNK') vocab_to_id_init = tf.contrib.lookup.KeyValueTensorInitializer(vocab_tensor, vocab_idx_tensor, tf.string, tf.int64) vocab_to_id_table = tf.contrib.lookup.HashTable(vocab_to_id_init, default_value) word_to_count_init = tf.contrib.lookup.KeyValueTensorInitializer(vocab_tensor, count_tensor, tf.string, tf.float32) word_to_count_table = tf.contrib.lookup.HashTable(word_to_count_init, (- 1)) return (vocab_to_id_table, id_to_vocab_table, word_to_count_table, vocab_size)
@function.Defun(tf.float32, tf.float32, tf.float32, func_name='att_sum_bahdanau', noinline=True) def att_sum_bahdanau(v_att, keys, query): 'Calculates a batch- and timweise dot product with a variable' return tf.reduce_sum((v_att * tf.tanh((keys + tf.expand_dims(query, 1)))), [2])
@function.Defun(tf.float32, tf.float32, func_name='att_sum_dot', noinline=True) def att_sum_dot(keys, query): 'Calculates a batch- and timweise dot product' return tf.reduce_sum((keys * tf.expand_dims(query, 1)), [2])
@six.add_metaclass(abc.ABCMeta) class AttentionLayer(GraphModule, Configurable): '\n Attention layer according to https://arxiv.org/abs/1409.0473.\n\n Params:\n num_units: Number of units used in the attention layer\n ' def __init__(self, params, mode, name='attention'): GraphModule.__init__(self, name) Configurable.__init__(self, params, mode) @staticmethod def default_params(): return {'num_units': 128} @abc.abstractmethod def score_fn(self, keys, query): 'Computes the attention score' raise NotImplementedError def _build(self, query, keys, values, values_length): 'Computes attention scores and outputs.\n\n Args:\n query: The query used to calculate attention scores.\n In seq2seq this is typically the current state of the decoder.\n A tensor of shape `[B, ...]`\n keys: The keys used to calculate attention scores. In seq2seq, these\n are typically the outputs of the encoder and equivalent to `values`.\n A tensor of shape `[B, T, ...]` where each element in the `T`\n dimension corresponds to the key for that value.\n values: The elements to compute attention over. In seq2seq, this is\n typically the sequence of encoder outputs.\n A tensor of shape `[B, T, input_dim]`.\n values_length: An int32 tensor of shape `[B]` defining the sequence\n length of the attention values.\n\n Returns:\n A tuple `(scores, context)`.\n `scores` is vector of length `T` where each element is the\n normalized "score" of the corresponding `inputs` element.\n `context` is the final attention layer output corresponding to\n the weighted inputs.\n A tensor fo shape `[B, input_dim]`.\n ' values_depth = values.get_shape().as_list()[(- 1)] att_keys = tf.contrib.layers.fully_connected(inputs=keys, num_outputs=self.params['num_units'], activation_fn=None, scope='att_keys') att_query = tf.contrib.layers.fully_connected(inputs=query, num_outputs=self.params['num_units'], activation_fn=None, scope='att_query') scores = self.score_fn(att_keys, att_query) num_scores = tf.shape(scores)[1] scores_mask = tf.sequence_mask(lengths=tf.to_int32(values_length), maxlen=tf.to_int32(num_scores), dtype=tf.float32) scores = ((scores * scores_mask) + ((1.0 - scores_mask) * tf.float32.min)) scores_normalized = tf.nn.softmax(scores, name='scores_normalized') context = (tf.expand_dims(scores_normalized, 2) * values) context = tf.reduce_sum(context, 1, name='context') context.set_shape([None, values_depth]) return (scores_normalized, context)
class AttentionLayerDot(AttentionLayer): 'An attention layer that calculates attention scores using\n a dot product.\n ' def score_fn(self, keys, query): return att_sum_dot(keys, query)
class AttentionLayerBahdanau(AttentionLayer): 'An attention layer that calculates attention scores using\n a parameterized multiplication.' def score_fn(self, keys, query): v_att = tf.get_variable('v_att', shape=[self.params['num_units']], dtype=tf.float32) return att_sum_bahdanau(v_att, keys, query)
class AttentionDecoderOutput(namedtuple('DecoderOutput', ['logits', 'predicted_ids', 'cell_output', 'attention_scores', 'attention_context'])): 'Augmented decoder output that also includes the attention scores.\n ' pass
class AttentionDecoder(RNNDecoder): 'An RNN Decoder that uses attention over an input sequence.\n\n Args:\n cell: An instance of ` tf.contrib.rnn.RNNCell`\n helper: An instance of `tf.contrib.seq2seq.Helper` to assist decoding\n initial_state: A tensor or tuple of tensors used as the initial cell\n state.\n vocab_size: Output vocabulary size, i.e. number of units\n in the softmax layer\n attention_keys: The sequence used to calculate attention scores.\n A tensor of shape `[B, T, ...]`.\n attention_values: The sequence to attend over.\n A tensor of shape `[B, T, input_dim]`.\n attention_values_length: Sequence length of the attention values.\n An int32 Tensor of shape `[B]`.\n attention_fn: The attention function to use. This function map from\n `(state, inputs)` to `(attention_scores, attention_context)`.\n For an example, see `seq2seq.decoder.attention.AttentionLayer`.\n reverse_scores: Optional, an array of sequence length. If set,\n reverse the attention scores in the output. This is used for when\n a reversed source sequence is fed as an input but you want to\n return the scores in non-reversed order.\n ' def __init__(self, params, mode, vocab_size, attention_keys, attention_values, attention_values_length, attention_fn, reverse_scores_lengths=None, decoder_mask=None, name='attention_decoder'): super(AttentionDecoder, self).__init__(params, mode, name) self.vocab_size = vocab_size self.attention_keys = attention_keys self.attention_values = attention_values self.attention_values_length = attention_values_length self.attention_fn = attention_fn self.reverse_scores_lengths = reverse_scores_lengths self.decoder_mask = decoder_mask @property def output_size(self): return AttentionDecoderOutput(logits=self.vocab_size, predicted_ids=tf.TensorShape([]), cell_output=self.cell.output_size, attention_scores=tf.shape(self.attention_values)[1:(- 1)], attention_context=self.attention_values.get_shape()[(- 1)]) @property def output_dtype(self): return AttentionDecoderOutput(logits=tf.float32, predicted_ids=tf.int32, cell_output=tf.float32, attention_scores=tf.float32, attention_context=tf.float32) def initialize(self, name=None): (finished, first_inputs) = self.helper.initialize() attention_context = tf.zeros([tf.shape(first_inputs)[0], self.attention_values.get_shape().as_list()[(- 1)]]) first_inputs = tf.concat([first_inputs, attention_context], 1) return (finished, first_inputs, self.initial_state) def compute_output(self, cell_output): 'Computes the decoder outputs.' (att_scores, attention_context) = self.attention_fn(query=cell_output, keys=self.attention_keys, values=self.attention_values, values_length=self.attention_values_length) softmax_input = tf.contrib.layers.fully_connected(inputs=tf.concat([cell_output, attention_context], 1), num_outputs=self.cell.output_size, activation_fn=tf.nn.tanh, scope='attention_mix') logits = tf.contrib.layers.fully_connected(inputs=softmax_input, num_outputs=self.vocab_size, activation_fn=None, scope='logits') return (softmax_input, logits, att_scores, attention_context) def _setup(self, initial_state, helper): self.initial_state = initial_state def att_next_inputs(time, outputs, state, sample_ids, name=None): 'Wraps the original decoder helper function to append the attention\n context.\n ' (finished, next_inputs, next_state) = helper.next_inputs(time=time, outputs=outputs, state=state, sample_ids=sample_ids, name=name) next_inputs = tf.concat([next_inputs, outputs.attention_context], 1) return (finished, next_inputs, next_state) self.helper = CustomHelper(initialize_fn=helper.initialize, sample_fn=helper.sample, next_inputs_fn=att_next_inputs) def step(self, time_, inputs, state, name=None): (cell_output, cell_state) = self.cell(inputs, state) (cell_output_new, logits, attention_scores, attention_context) = self.compute_output(cell_output) if (self.reverse_scores_lengths is not None): attention_scores = tf.reverse_sequence(input=attention_scores, seq_lengths=self.reverse_scores_lengths, seq_dim=1, batch_dim=0) if (self.mode == tf.contrib.learn.ModeKeys.INFER): pred = ((logits * self.decoder_mask) + ((tf.reduce_min(logits) - 1) * (1 - self.decoder_mask))) sample_ids = self.helper.sample(time=time_, outputs=pred, state=cell_state) else: sample_ids = self.helper.sample(time=time_, outputs=logits, state=cell_state) outputs = AttentionDecoderOutput(logits=logits, predicted_ids=sample_ids, cell_output=cell_output_new, attention_scores=attention_scores, attention_context=attention_context) (finished, next_inputs, next_state) = self.helper.next_inputs(time=time_, outputs=outputs, state=cell_state, sample_ids=sample_ids) return (outputs, next_state, next_inputs, finished)
class BasicDecoder(RNNDecoder): 'Simple RNN decoder that performed a softmax operations on the cell output.\n ' def __init__(self, params, mode, vocab_size, decoder_mask=None, name='basic_decoder'): super(BasicDecoder, self).__init__(params, mode, name) self.vocab_size = vocab_size self.decoder_mask = decoder_mask def compute_output(self, cell_output): 'Computes the decoder outputs.' return tf.contrib.layers.fully_connected(inputs=cell_output, num_outputs=self.vocab_size, activation_fn=None) @property def output_size(self): return DecoderOutput(logits=self.vocab_size, predicted_ids=tf.TensorShape([]), cell_output=self.cell.output_size) @property def output_dtype(self): return DecoderOutput(logits=tf.float32, predicted_ids=tf.int32, cell_output=tf.float32) def initialize(self, name=None): (finished, first_inputs) = self.helper.initialize() return (finished, first_inputs, self.initial_state) def step(self, time_, inputs, state, name=None): (cell_output, cell_state) = self.cell(inputs, state) logits = self.compute_output(cell_output) if (self.mode == tf.contrib.learn.ModeKeys.INFER): pred = ((logits * self.decoder_mask) + ((tf.reduce_min(logits) - 1) * (1 - self.decoder_mask))) sample_ids = self.helper.sample(time=time_, outputs=pred, state=cell_state) else: sample_ids = self.helper.sample(time=time_, outputs=logits, state=cell_state) outputs = DecoderOutput(logits=logits, predicted_ids=sample_ids, cell_output=cell_output) (finished, next_inputs, next_state) = self.helper.next_inputs(time=time_, outputs=outputs, state=cell_state, sample_ids=sample_ids) return (outputs, next_state, next_inputs, finished)
class FinalBeamDecoderOutput(namedtuple('FinalBeamDecoderOutput', ['predicted_ids', 'beam_search_output'])): 'Final outputs returned by the beam search after all decoding is finished.\n\n Args:\n predicted_ids: The final prediction. A tensor of shape\n `[T, 1, beam_width]`.\n beam_search_output: An instance of `BeamDecoderOutput` that describes\n the state of the beam search.\n ' pass
class BeamDecoderOutput(namedtuple('BeamDecoderOutput', ['logits', 'predicted_ids', 'log_probs', 'scores', 'beam_parent_ids', 'original_outputs'])): 'Structure for the output of a beam search decoder. This class is used\n to define the output at each step as well as the final output of the decoder.\n If used as the final output, a time dimension `T` is inserted after the\n beam_size dimension.\n\n Args:\n logits: Logits at the current time step of shape `[beam_size, vocab_size]`\n predicted_ids: Chosen softmax predictions at the current time step.\n An int32 tensor of shape `[beam_size]`.\n log_probs: Total log probabilities of all beams at the current time step.\n A float32 tensor of shaep `[beam_size]`.\n scores: Total scores of all beams at the current time step. This differs\n from log probabilities in that the score may add additional processing\n such as length normalization. A float32 tensor of shape `[beam_size]`.\n beam_parent_ids: The indices of the beams that are being continued.\n An int32 tensor of shape `[beam_size]`.\n ' pass
class BeamSearchDecoder(RNNDecoder): 'The BeamSearchDecoder wraps another decoder to perform beam search instead\n of greedy selection. This decoder must be used with batch size of 1, which\n will result in an effective batch size of `beam_width`.\n\n Args:\n decoder: A instance of `RNNDecoder` to be used with beam search.\n config: A `BeamSearchConfig` that defines beam search decoding parameters.\n ' def __init__(self, decoder, config): super(BeamSearchDecoder, self).__init__(decoder.params, decoder.mode, decoder.name) self.decoder = decoder self.config = config def __call__(self, *args, **kwargs): with self.decoder.variable_scope(): return self._build(*args, **kwargs) @property def output_size(self): return BeamDecoderOutput(logits=self.decoder.vocab_size, predicted_ids=tf.TensorShape([]), log_probs=tf.TensorShape([]), scores=tf.TensorShape([]), beam_parent_ids=tf.TensorShape([]), original_outputs=self.decoder.output_size) @property def output_dtype(self): return BeamDecoderOutput(logits=tf.float32, predicted_ids=tf.int32, log_probs=tf.float32, scores=tf.float32, beam_parent_ids=tf.int32, original_outputs=self.decoder.output_dtype) @property def batch_size(self): return self.config.beam_width def initialize(self, name=None): (finished, first_inputs, initial_state) = self.decoder.initialize() beam_state = beam_search.create_initial_beam_state(config=self.config) return (finished, first_inputs, (initial_state, beam_state)) def finalize(self, outputs, final_state): predicted_ids = beam_search.gather_tree(outputs.predicted_ids, outputs.beam_parent_ids) outputs = nest.map_structure((lambda x: tf.expand_dims(x, 1)), outputs) final_outputs = FinalBeamDecoderOutput(predicted_ids=tf.expand_dims(predicted_ids, 1), beam_search_output=outputs) return (final_outputs, final_state) def _build(self, initial_state, helper): initial_state = nest.map_structure((lambda x: tf.tile(x, [self.batch_size, 1])), initial_state) self.decoder._setup(initial_state, helper) return super(BeamSearchDecoder, self)._build(self.decoder.initial_state, self.decoder.helper) def step(self, time_, inputs, state, name=None): (decoder_state, beam_state) = state (decoder_output, decoder_state, _, _) = self.decoder.step(time_, inputs, decoder_state) (bs_output, beam_state) = beam_search.beam_search_step(time_=time_, logits=decoder_output.logits, beam_state=beam_state, config=self.config) decoder_state = nest.map_structure((lambda x: tf.gather(x, bs_output.beam_parent_ids)), decoder_state) decoder_output = nest.map_structure((lambda x: tf.gather(x, bs_output.beam_parent_ids)), decoder_output) next_state = (decoder_state, beam_state) outputs = BeamDecoderOutput(logits=tf.zeros([self.config.beam_width, self.config.vocab_size]), predicted_ids=bs_output.predicted_ids, log_probs=beam_state.log_probs, scores=bs_output.scores, beam_parent_ids=bs_output.beam_parent_ids, original_outputs=decoder_output) (finished, next_inputs, next_state) = self.decoder.helper.next_inputs(time=time_, outputs=decoder_output, state=next_state, sample_ids=bs_output.predicted_ids) next_inputs.set_shape([self.batch_size, None]) return (outputs, next_state, next_inputs, finished)
class DecoderOutput(namedtuple('DecoderOutput', ['logits', 'predicted_ids', 'cell_output'])): 'Output of an RNN decoder.\n\n Note that we output both the logits and predictions because during\n dynamic decoding the predictions may not correspond to max(logits).\n For example, we may be sampling from the logits instead.\n ' pass
@six.add_metaclass(abc.ABCMeta) class RNNDecoder(Decoder, GraphModule, Configurable): 'Base class for RNN decoders.\n\n Args:\n cell: An instance of ` tf.contrib.rnn.RNNCell`\n helper: An instance of `tf.contrib.seq2seq.Helper` to assist decoding\n initial_state: A tensor or tuple of tensors used as the initial cell\n state.\n name: A name for this module\n ' def __init__(self, params, mode, name): GraphModule.__init__(self, name) Configurable.__init__(self, params, mode) self.params['rnn_cell'] = _toggle_dropout(self.params['rnn_cell'], mode) self.cell = training_utils.get_rnn_cell(**self.params['rnn_cell']) self.initial_state = None self.helper = None @abc.abstractmethod def initialize(self, name=None): raise NotImplementedError @abc.abstractmethod def step(self, name=None): raise NotImplementedError @property def batch_size(self): return tf.shape(nest.flatten([self.initial_state])[0])[0] def _setup(self, initial_state, helper): 'Sets the initial state and helper for the decoder.\n ' self.initial_state = initial_state self.helper = helper def finalize(self, outputs, final_state): 'Applies final transformation to the decoder output once decoding is\n finished.\n ' return (outputs, final_state) @staticmethod def default_params(): return {'max_decode_length': 100, 'rnn_cell': _default_rnn_cell_params(), 'init_scale': 0.04} def _build(self, initial_state, helper): if (not self.initial_state): self._setup(initial_state, helper) scope = tf.get_variable_scope() scope.set_initializer(tf.random_uniform_initializer((- self.params['init_scale']), self.params['init_scale'])) maximum_iterations = None if (self.mode == tf.contrib.learn.ModeKeys.INFER): maximum_iterations = self.params['max_decode_length'] (outputs, final_state) = dynamic_decode(decoder=self, output_time_major=True, impute_finished=False, maximum_iterations=maximum_iterations) return self.finalize(outputs, final_state)
class SchemaAttentionDecoderOutput(namedtuple('DecoderOutput', ['logits', 'predicted_ids', 'cell_output', 'attention_scores', 'attention_context', 'schema_attention_scores', 'schema_attention_context'])): 'Augmented decoder output that also includes the attention scores.\n ' pass
class SchemaCopyingAttentionDecoderOutput(namedtuple('DecoderOutput', ['logits', 'predicted_ids', 'cell_output', 'attention_scores', 'attention_context', 'schema_attention_scores', 'schema_attention_context', 'schema_attention_copy_vals'])): 'Augmented decoder output that also includes the attention scores\n and copy vals.\n ' pass
class SchemaMapAttentionDecoderOutput(namedtuple('DecoderOutput', ['logits', 'predicted_ids', 'cell_output', 'attention_scores', 'attention_context', 'schema_attention_scores', 'schema_attention_context', 'schema_map_attention_scores', 'schema_map_attention_context'])): 'Augmented decoder output that also includes the attention scores.\n ' pass
class SchemaAttentionDecoder(RNNDecoder): 'An RNN Decoder that uses attention over an input sequence and a schema.\n\n Args:\n cell: An instance of ` tf.contrib.rnn.RNNCell`\n helper: An instance of `tf.contrib.seq2seq.Helper` to assist decoding\n initial_state: A tensor or tuple of tensors used as the initial cell\n state.\n vocab_size: Output vocabulary size, i.e. number of units\n in the softmax layer\n attention_keys: The sequence used to calculate attention scores.\n A tensor of shape `[B, T, ...]`.\n attention_values: The sequence to attend over.\n A tensor of shape `[B, T, input_dim]`.\n attention_values_length: Sequence length of the attention values.\n An int32 Tensor of shape `[B]`.\n attention_fn: The attention function to use. This function map from\n `(state, inputs)` to `(attention_scores, attention_context)`.\n For an example, see `seq2seq.decoder.attention.AttentionLayer`.\n reverse_scores: Optional, an array of sequence length. If set,\n reverse the attention scores in the output. This is used for when\n a reversed source sequence is fed as an input but you want to\n return the scores in non-reversed order.\n ' def __init__(self, params, mode, vocab_size, attention_keys, attention_values, attention_values_length, attention_fn, reverse_scores_lengths=None, schema_attention_keys=None, schema_attention_values=None, schema_attention_values_length=None, schema_attention_fn=None, name='schema_attention_decoder'): super(SchemaAttentionDecoder, self).__init__(params, mode, name) self.vocab_size = vocab_size self.attention_keys = attention_keys self.attention_values = attention_values self.attention_values_length = attention_values_length self.attention_fn = attention_fn self.reverse_scores_lengths = reverse_scores_lengths self.schema_attention_keys = schema_attention_keys self.schema_attention_values = schema_attention_values self.schema_attention_values_length = schema_attention_values_length if schema_attention_fn: self.schema_attention_fn = schema_attention_fn else: self.schema_attention_fn = attention_fn @property def output_size(self): return SchemaAttentionDecoderOutput(logits=self.vocab_size, predicted_ids=tf.TensorShape([]), cell_output=self.cell.output_size, attention_scores=tf.shape(self.attention_values)[1:(- 1)], attention_context=self.attention_values.get_shape()[(- 1)], schema_attention_scores=tf.shape(self.schema_attention_values)[1:(- 1)], schema_attention_context=self.schema_attention_values.get_shape()[(- 1)]) @property def output_dtype(self): return SchemaAttentionDecoderOutput(logits=tf.float32, predicted_ids=tf.int32, cell_output=tf.float32, attention_scores=tf.float32, attention_context=tf.float32, schema_attention_scores=tf.float32, schema_attention_context=tf.float32) def initialize(self, name=None): (finished, first_inputs) = self.helper.initialize() attention_context = tf.zeros([tf.shape(first_inputs)[0], self.attention_values.get_shape().as_list()[(- 1)]]) schema_attention_context = tf.zeros([tf.shape(first_inputs)[0], self.schema_attention_values.get_shape().as_list()[(- 1)]]) first_inputs = tf.concat([first_inputs, attention_context, schema_attention_context], 1) return (finished, first_inputs, self.initial_state) def compute_output(self, cell_output, calculate_softmax=True): 'Computes the decoder outputs.' (att_scores, attention_context) = self.attention_fn(query=cell_output, keys=self.attention_keys, values=self.attention_values, values_length=self.attention_values_length) (schema_att_scores, schema_attention_context) = self.schema_attention_fn(query=cell_output, keys=self.schema_attention_keys, values=self.schema_attention_values, values_length=self.schema_attention_values_length) softmax_input = None logits = None if calculate_softmax: (softmax_input, logits) = self._calculate_softmax([cell_output, attention_context, schema_attention_context]) return (softmax_input, logits, att_scores, attention_context, schema_att_scores, schema_attention_context) def _calculate_softmax(self, list_of_contexts): softmax_input = tf.contrib.layers.fully_connected(inputs=tf.concat(list_of_contexts, 1), num_outputs=self.cell.output_size, activation_fn=tf.nn.tanh, scope='attention_mix') logits = tf.contrib.layers.fully_connected(inputs=softmax_input, num_outputs=self.vocab_size, activation_fn=None, scope='logits') return (softmax_input, logits) def _setup(self, initial_state, helper): self.initial_state = initial_state def att_next_inputs(time, outputs, state, sample_ids, name=None): 'Wraps the original decoder helper function to append the attention\n context.\n ' (finished, next_inputs, next_state) = helper.next_inputs(time=time, outputs=outputs, state=state, sample_ids=sample_ids, name=name) next_inputs = tf.concat([next_inputs, outputs.attention_context, outputs.schema_attention_context], 1) return (finished, next_inputs, next_state) self.helper = CustomHelper(initialize_fn=helper.initialize, sample_fn=helper.sample, next_inputs_fn=att_next_inputs) def step(self, time_, inputs, state, name=None): (cell_output, cell_state) = self.cell(inputs, state) (cell_output_new, logits, attention_scores, attention_context, schema_attention_scores, schema_attention_context) = self.compute_output(cell_output) if (self.reverse_scores_lengths is not None): attention_scores = tf.reverse_sequence(input=attention_scores, seq_lengths=self.reverse_scores_lengths, seq_dim=1, batch_dim=0) sample_ids = self.helper.sample(time=time_, outputs=logits, state=cell_state) outputs = SchemaAttentionDecoderOutput(logits=logits, predicted_ids=sample_ids, cell_output=cell_output_new, attention_scores=attention_scores, attention_context=attention_context, schema_attention_scores=schema_attention_scores, schema_attention_context=schema_attention_context) (finished, next_inputs, next_state) = self.helper.next_inputs(time=time_, outputs=outputs, state=cell_state, sample_ids=sample_ids) return (outputs, next_state, next_inputs, finished)
class SchemaAttentionCopyingDecoder(SchemaAttentionDecoder): '\n The version of SchemaAttentionCopyingDecoder that uses\n F(score_n, rowembedding_n, h, c, W) to generate a score for the\n n-th field in the schema.\n ' def __init__(self, params, mode, vocab_size, attention_keys, attention_values, attention_values_length, attention_fn, reverse_scores_lengths=None, schema_attention_keys=None, schema_attention_values=None, schema_attention_values_length=None, schema_attention_fn=None, name='schema_attention_copying_decoder'): super(SchemaAttentionCopyingDecoder, self).__init__(params, mode, vocab_size, attention_keys, attention_values, attention_values_length, attention_fn, reverse_scores_lengths, schema_attention_keys, schema_attention_values, schema_attention_values_length, schema_attention_fn, name) self.schema_embs = schema_attention_values @property def output_size(self): return SchemaCopyingAttentionDecoderOutput(logits=self.vocab_size, predicted_ids=tf.TensorShape([]), cell_output=self.cell.output_size, attention_scores=tf.shape(self.attention_values)[1:(- 1)], attention_context=self.attention_values.get_shape()[(- 1)], schema_attention_scores=tf.shape(self.schema_attention_values)[1:(- 1)], schema_attention_context=self.schema_attention_values.get_shape()[(- 1)], schema_attention_copy_vals=tf.shape(self.schema_attention_values)[1:(- 1)]) @property def output_dtype(self): return SchemaCopyingAttentionDecoderOutput(logits=tf.float32, predicted_ids=tf.int32, cell_output=tf.float32, attention_scores=tf.float32, attention_context=tf.float32, schema_attention_scores=tf.float32, schema_attention_context=tf.float32, schema_attention_copy_vals=tf.float32) def compute_output(self, cell_output): (softmax_input, logits, att_scores, attention_context, schema_att_scores, schema_attention_context) = super(SchemaAttentionCopyingDecoder, self).compute_output(cell_output) schema_attention_copy_vals = schema_att_scores weighted_schema_embs_size = (self.cell.output_size + self.attention_values.get_shape().as_list()[(- 1)]) weighted_schema_embs = tf.contrib.layers.fully_connected(inputs=self.schema_embs, num_outputs=weighted_schema_embs_size, activation_fn=None, scope='weighted_schema_embs') concatenated = tf.expand_dims(tf.concat([cell_output, attention_context], 1), axis=2) schema_attention_copy_vals = (schema_att_scores * tf.squeeze(tf.matmul(weighted_schema_embs, concatenated), axis=2)) return (softmax_input, logits, att_scores, attention_context, schema_att_scores, schema_attention_context, schema_attention_copy_vals) def _setup(self, initial_state, helper): self.initial_state = initial_state def att_next_inputs(time, outputs, state, sample_ids, name=None): 'Wraps the original decoder helper function to append the attention\n context.\n ' (finished, next_inputs, next_state) = helper.next_inputs(time=time, outputs=outputs, state=state, sample_ids=sample_ids, name=name) next_inputs = tf.concat([next_inputs, outputs.attention_context, outputs.schema_attention_context], 1) return (finished, next_inputs, next_state) self.helper = CustomHelper(initialize_fn=helper.initialize, sample_fn=helper.sample, next_inputs_fn=att_next_inputs) def step(self, time_, inputs, state, name=None): (cell_output, cell_state) = self.cell(inputs, state) (cell_output_new, logits, attention_scores, attention_context, schema_attention_scores, schema_attention_context, schema_attention_copy_vals) = self.compute_output(cell_output) if (self.reverse_scores_lengths is not None): attention_scores = tf.reverse_sequence(input=attention_scores, seq_lengths=self.reverse_scores_lengths, seq_dim=1, batch_dim=0) sample_ids = self.helper.sample(time=time_, outputs=logits, state=cell_state) outputs = SchemaCopyingAttentionDecoderOutput(logits=logits, predicted_ids=sample_ids, cell_output=cell_output_new, attention_scores=attention_scores, attention_context=attention_context, schema_attention_scores=schema_attention_scores, schema_attention_context=schema_attention_context, schema_attention_copy_vals=schema_attention_copy_vals) (finished, next_inputs, next_state) = self.helper.next_inputs(time=time_, outputs=outputs, state=cell_state, sample_ids=sample_ids) return (outputs, next_state, next_inputs, finished)
class SchemaMapAttentionDecoder(SchemaAttentionDecoder): 'An RNN Decoder that uses attention over an input sequence and a schema\n and a schema map.\n\n Args:\n cell: An instance of ` tf.contrib.rnn.RNNCell`\n helper: An instance of `tf.contrib.seq2seq.Helper` to assist decoding\n initial_state: A tensor or tuple of tensors used as the initial cell\n state.\n vocab_size: Output vocabulary size, i.e. number of units\n in the softmax layer\n attention_keys: The sequence used to calculate attention scores.\n A tensor of shape `[B, T, ...]`.\n attention_values: The sequence to attend over.\n A tensor of shape `[B, T, input_dim]`.\n attention_values_length: Sequence length of the attention values.\n An int32 Tensor of shape `[B]`.\n attention_fn: The attention function to use. This function map from\n `(state, inputs)` to `(attention_scores, attention_context)`.\n For an example, see `seq2seq.decoder.attention.AttentionLayer`.\n reverse_scores: Optional, an array of sequence length. If set,\n reverse the attention scores in the output. This is used for when\n a reversed source sequence is fed as an input but you want to\n return the scores in non-reversed order.\n ' def __init__(self, params, mode, vocab_size, attention_keys, attention_values, attention_values_length, attention_fn, reverse_scores_lengths=None, schema_attention_keys=None, schema_attention_values=None, schema_attention_values_length=None, schema_attention_fn=None, schema_map_attention_keys=None, schema_map_attention_values=None, schema_map_attention_values_length=None, schema_map_attention_fn=None, name='schema_map_attention_decoder'): super(SchemaMapAttentionDecoder, self).__init__(params, mode, vocab_size, attention_keys, attention_values, attention_values_length, attention_fn, reverse_scores_lengths, schema_attention_keys, schema_attention_values, schema_attention_values_length, schema_attention_fn, name) self.schema_map_attention_keys = schema_attention_keys self.schema_map_attention_values = schema_attention_values self.schema_map_attention_values_length = schema_attention_values_length if schema_map_attention_fn: self.schema_map_attention_fn = schema_map_attention_fn else: self.schema_map_attention_fn = attention_fn @property def output_size(self): return SchemaMapAttentionDecoderOutput(logits=self.vocab_size, predicted_ids=tf.TensorShape([]), cell_output=self.cell.output_size, attention_scores=tf.shape(self.attention_values)[1:(- 1)], attention_context=self.attention_values.get_shape()[(- 1)], schema_attention_scores=tf.shape(self.schema_attention_values)[1:(- 1)], schema_attention_context=self.schema_attention_values.get_shape()[(- 1)], schema_map_attention_scores=tf.shape(self.schema_map_attention_values)[1:(- 1)], schema_map_attention_context=self.schema_map_attention_values.get_shape()[(- 1)]) @property def output_dtype(self): return SchemaMapAttentionDecoderOutput(logits=tf.float32, predicted_ids=tf.int32, cell_output=tf.float32, attention_scores=tf.float32, attention_context=tf.float32, schema_attention_scores=tf.float32, schema_attention_context=tf.float32, schema_map_attention_scores=tf.float32, schema_map_attention_context=tf.float32) def initialize(self, name=None): (finished, first_inputs, initial_state) = super(SchemaMapAttentionDecoder, self).initialize(name=name) schema_map_attention_context = tf.zeros([tf.shape(first_inputs)[0], self.schema_attention_values.get_shape().as_list()[(- 1)]]) first_inputs = tf.concat([first_inputs, schema_map_attention_context], 1) return (finished, first_inputs, initial_state) def compute_output(self, cell_output, calculate_softmax=True): (softmax_input, logits, att_scores, attention_context, schema_att_scores, schema_attention_context) = super(SchemaMapAttentionDecoder, self).compute_output(cell_output, calculate_softmax=False) with tf.variable_scope('schema_map_att'): (schema_map_att_scores, schema_map_attention_context) = self.schema_map_attention_fn(query=cell_output, keys=self.schema_map_attention_keys, values=self.schema_map_attention_values, values_length=self.schema_map_attention_values_length) if calculate_softmax: (softmax_input, logits) = self._calculate_softmax([cell_output, attention_context, schema_attention_context, schema_map_attention_context]) return (softmax_input, logits, att_scores, attention_context, schema_att_scores, schema_attention_context, schema_map_att_scores, schema_map_attention_context) def _setup(self, initial_state, helper): self.initial_state = initial_state def att_next_inputs(time, outputs, state, sample_ids, name=None): 'Wraps the original decoder helper function to append the attention\n context.\n ' (finished, next_inputs, next_state) = helper.next_inputs(time=time, outputs=outputs, state=state, sample_ids=sample_ids, name=name) next_inputs = tf.concat([next_inputs, outputs.attention_context, outputs.schema_attention_context, outputs.schema_map_attention_context], 1) return (finished, next_inputs, next_state) self.helper = CustomHelper(initialize_fn=helper.initialize, sample_fn=helper.sample, next_inputs_fn=att_next_inputs) def step(self, time_, inputs, state, name=None): (cell_output, cell_state) = self.cell(inputs, state) (cell_output_new, logits, attention_scores, attention_context, schema_attention_scores, schema_attention_context, schema_map_attention_scores, schema_map_attention_context) = self.compute_output(cell_output) if (self.reverse_scores_lengths is not None): attention_scores = tf.reverse_sequence(input=attention_scores, seq_lengths=self.reverse_scores_lengths, seq_dim=1, batch_dim=0) sample_ids = self.helper.sample(time=time_, outputs=logits, state=cell_state) outputs = SchemaMapAttentionDecoderOutput(logits=logits, predicted_ids=sample_ids, cell_output=cell_output_new, attention_scores=attention_scores, attention_context=attention_context, schema_attention_scores=schema_attention_scores, schema_attention_context=schema_attention_context, schema_map_attention_scores=schema_map_attention_scores, schema_map_attention_context=schema_map_attention_context) (finished, next_inputs, next_state) = self.helper.next_inputs(time=time_, outputs=outputs, state=cell_state, sample_ids=sample_ids) return (outputs, next_state, next_inputs, finished)
class ConvEncoder(Encoder): 'A deep convolutional encoder, as described in\n https://arxiv.org/abs/1611.02344. The encoder supports optional positions\n embeddings.\n\n Params:\n attention_cnn.units: Number of units in `cnn_a`. Same in each layer.\n attention_cnn.kernel_size: Kernel size for `cnn_a`.\n attention_cnn.layers: Number of layers in `cnn_a`.\n embedding_dropout_keep_prob: Dropout keep probability\n applied to the embeddings.\n output_cnn.units: Number of units in `cnn_c`. Same in each layer.\n output_cnn.kernel_size: Kernel size for `cnn_c`.\n output_cnn.layers: Number of layers in `cnn_c`.\n position_embeddings.enable: If true, add position embeddings to the\n inputs before pooling.\n position_embeddings.combiner_fn: Function used to combine the\n position embeddings with the inputs. For example, `tensorflow.add`.\n position_embeddings.num_positions: Size of the position embedding matrix.\n This should be set to the maximum sequence length of the inputs.\n ' def __init__(self, params, mode, name='conv_encoder'): super(ConvEncoder, self).__init__(params, mode, name) self._combiner_fn = locate(self.params['position_embeddings.combiner_fn']) @staticmethod def default_params(): return {'attention_cnn.units': 512, 'attention_cnn.kernel_size': 3, 'attention_cnn.layers': 15, 'embedding_dropout_keep_prob': 0.8, 'output_cnn.units': 256, 'output_cnn.kernel_size': 3, 'output_cnn.layers': 5, 'position_embeddings.enable': True, 'position_embeddings.combiner_fn': 'tensorflow.multiply', 'position_embeddings.num_positions': 100} def encode(self, inputs, sequence_length): if self.params['position_embeddings.enable']: positions_embed = _create_position_embedding(embedding_dim=inputs.get_shape().as_list()[(- 1)], num_positions=self.params['position_embeddings.num_positions'], lengths=sequence_length, maxlen=tf.shape(inputs)[1]) inputs = self._combiner_fn(inputs, positions_embed) inputs = tf.contrib.layers.dropout(inputs=inputs, keep_prob=self.params['embedding_dropout_keep_prob'], is_training=(self.mode == tf.contrib.learn.ModeKeys.TRAIN)) with tf.variable_scope('cnn_a'): cnn_a_output = inputs for layer_idx in range(self.params['attention_cnn.layers']): next_layer = tf.contrib.layers.conv2d(inputs=cnn_a_output, num_outputs=self.params['attention_cnn.units'], kernel_size=self.params['attention_cnn.kernel_size'], padding='SAME', activation_fn=None) if (layer_idx > 0): next_layer += cnn_a_output cnn_a_output = tf.tanh(next_layer) with tf.variable_scope('cnn_c'): cnn_c_output = inputs for layer_idx in range(self.params['output_cnn.layers']): next_layer = tf.contrib.layers.conv2d(inputs=cnn_c_output, num_outputs=self.params['output_cnn.units'], kernel_size=self.params['output_cnn.kernel_size'], padding='SAME', activation_fn=None) if (layer_idx > 0): next_layer += cnn_c_output cnn_c_output = tf.tanh(next_layer) final_state = tf.reduce_mean(cnn_c_output, 1) return EncoderOutput(outputs=cnn_a_output, final_state=final_state, attention_values=cnn_c_output, attention_values_length=sequence_length)
@six.add_metaclass(abc.ABCMeta) class Encoder(GraphModule, Configurable): 'Abstract encoder class. All encoders should inherit from this.\n\n Args:\n params: A dictionary of hyperparameters for the encoder.\n name: A variable scope for the encoder graph.\n ' def __init__(self, params, mode, name): GraphModule.__init__(self, name) Configurable.__init__(self, params, mode) def _build(self, inputs, *args, **kwargs): return self.encode(inputs, *args, **kwargs) @abc.abstractmethod def encode(self, *args, **kwargs): '\n Encodes an input sequence.\n\n Args:\n inputs: The inputs to encode. A float32 tensor of shape [B, T, ...].\n sequence_length: The length of each input. An int32 tensor of shape [T].\n\n Returns:\n An `EncoderOutput` tuple containing the outputs and final state.\n ' raise NotImplementedError
class InceptionV3Encoder(Encoder): '\n A unidirectional RNN encoder. Stacking should be performed as\n part of the cell.\n\n Params:\n resize_height: Resize the image to this height before feeding it\n into the convolutional network.\n resize_width: Resize the image to this width before feeding it\n into the convolutional network.\n ' def __init__(self, params, mode, name='image_encoder'): super(InceptionV3Encoder, self).__init__(params, mode, name) @staticmethod def default_params(): return {'resize_height': 299, 'resize_width': 299} def encode(self, inputs): inputs = tf.image.resize_images(images=inputs, size=[self.params['resize_height'], self.params['resize_width']], method=tf.image.ResizeMethod.BILINEAR) (outputs, _) = inception_v3_base(tf.to_float(inputs)) output_shape = outputs.get_shape() shape_list = output_shape.as_list() outputs_flat = tf.reshape(outputs, [shape_list[0], (- 1), shape_list[(- 1)]]) final_state = tf.contrib.slim.avg_pool2d(outputs, output_shape[1:3], padding='VALID', scope='pool') final_state = tf.contrib.slim.flatten(outputs, scope='flatten') return EncoderOutput(outputs=outputs_flat, final_state=final_state, attention_values=outputs_flat, attention_values_length=tf.shape(outputs_flat)[1])
def _unpack_cell(cell): 'Unpack the cells because the stack_bidirectional_dynamic_rnn\n expects a list of cells, one per layer.' if isinstance(cell, tf.contrib.rnn.MultiRNNCell): return cell._cells else: return [cell]
def _default_rnn_cell_params(): 'Creates default parameters used by multiple RNN encoders.\n ' return {'cell_class': 'BasicLSTMCell', 'cell_params': {'num_units': 128}, 'dropout_input_keep_prob': 1.0, 'dropout_output_keep_prob': 1.0, 'num_layers': 1, 'residual_connections': False, 'residual_combiner': 'add', 'residual_dense': False}
def _toggle_dropout(cell_params, mode): 'Disables dropout during eval/inference mode\n ' cell_params = copy.deepcopy(cell_params) if (mode != tf.contrib.learn.ModeKeys.TRAIN): cell_params['dropout_input_keep_prob'] = 1.0 cell_params['dropout_output_keep_prob'] = 1.0 return cell_params
class UnidirectionalRNNEncoder(Encoder): '\n A unidirectional RNN encoder. Stacking should be performed as\n part of the cell.\n\n Args:\n cell: An instance of tf.contrib.rnn.RNNCell\n name: A name for the encoder\n ' def __init__(self, params, mode, name='forward_rnn_encoder'): super(UnidirectionalRNNEncoder, self).__init__(params, mode, name) self.params['rnn_cell'] = _toggle_dropout(self.params['rnn_cell'], mode) @staticmethod def default_params(): return {'rnn_cell': _default_rnn_cell_params(), 'init_scale': 0.04} def encode(self, inputs, sequence_length, **kwargs): scope = tf.get_variable_scope() scope.set_initializer(tf.random_uniform_initializer((- self.params['init_scale']), self.params['init_scale'])) cell = training_utils.get_rnn_cell(**self.params['rnn_cell']) (outputs, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputs, sequence_length=sequence_length, dtype=tf.float32, **kwargs) return EncoderOutput(outputs=outputs, final_state=state, attention_values=outputs, attention_values_length=sequence_length)
class BidirectionalRNNEncoder(Encoder): '\n A bidirectional RNN encoder. Uses the same cell for both the\n forward and backward RNN. Stacking should be performed as part of\n the cell.\n\n Args:\n cell: An instance of tf.contrib.rnn.RNNCell\n name: A name for the encoder\n ' def __init__(self, params, mode, name='bidi_rnn_encoder'): super(BidirectionalRNNEncoder, self).__init__(params, mode, name) self.params['rnn_cell'] = _toggle_dropout(self.params['rnn_cell'], mode) @staticmethod def default_params(): return {'rnn_cell': _default_rnn_cell_params(), 'init_scale': 0.04} def encode(self, inputs, sequence_length, **kwargs): scope = tf.get_variable_scope() scope.set_initializer(tf.random_uniform_initializer((- self.params['init_scale']), self.params['init_scale'])) cell_fw = training_utils.get_rnn_cell(**self.params['rnn_cell']) cell_bw = training_utils.get_rnn_cell(**self.params['rnn_cell']) (outputs, states) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw, cell_bw=cell_bw, inputs=inputs, sequence_length=sequence_length, dtype=tf.float32, **kwargs) outputs_concat = tf.concat(outputs, 2) return EncoderOutput(outputs=outputs_concat, final_state=states, attention_values=outputs_concat, attention_values_length=sequence_length)
class StackBidirectionalRNNEncoder(Encoder): '\n A stacked bidirectional RNN encoder. Uses the same cell for both the\n forward and backward RNN. Stacking should be performed as part of\n the cell.\n\n Args:\n cell: An instance of tf.contrib.rnn.RNNCell\n name: A name for the encoder\n ' def __init__(self, params, mode, name='stacked_bidi_rnn_encoder'): super(StackBidirectionalRNNEncoder, self).__init__(params, mode, name) self.params['rnn_cell'] = _toggle_dropout(self.params['rnn_cell'], mode) @staticmethod def default_params(): return {'rnn_cell': _default_rnn_cell_params(), 'init_scale': 0.04} def encode(self, inputs, sequence_length, **kwargs): scope = tf.get_variable_scope() scope.set_initializer(tf.random_uniform_initializer((- self.params['init_scale']), self.params['init_scale'])) cell_fw = training_utils.get_rnn_cell(**self.params['rnn_cell']) cell_bw = training_utils.get_rnn_cell(**self.params['rnn_cell']) cells_fw = _unpack_cell(cell_fw) cells_bw = _unpack_cell(cell_bw) result = rnn.stack_bidirectional_dynamic_rnn(cells_fw=cells_fw, cells_bw=cells_bw, inputs=inputs, dtype=tf.float32, sequence_length=sequence_length, **kwargs) (outputs_concat, _output_state_fw, _output_state_bw) = result final_state = (_output_state_fw, _output_state_bw) return EncoderOutput(outputs=outputs_concat, final_state=final_state, attention_values=outputs_concat, attention_values_length=sequence_length)
class GraphModule(object): '\n Convenience class that makes it easy to share variables.\n Each insance of this class creates its own set of variables, but\n each subsequent execution of an instance will re-use its variables.\n\n Graph components that define variables should inherit from this class\n and implement their logic in the `_build` method.\n ' def __init__(self, name): '\n Initialize the module. Each subclass must call this constructor with a name.\n\n Args:\n name: Name of this module. Used for `tf.make_template`.\n ' self.name = name self._template = tf.make_template(name, self._build, create_scope_now_=True) self.__doc__ = self._build.__doc__ self.__call__.__func__.__doc__ = self._build.__doc__ def _build(self, *args, **kwargs): 'Subclasses should implement their logic here.\n ' raise NotImplementedError def __call__(self, *args, **kwargs): return self._template(*args, **kwargs) def variable_scope(self): 'Returns the proper variable scope for this module.\n ' return tf.variable_scope(self._template.variable_scope)
def templatemethod(name_): 'This decorator wraps a method with `tf.make_template`. For example,\n\n @templatemethod\n def my_method():\n # Create variables\n ' def template_decorator(func): 'Inner decorator function' def func_wrapper(*args, **kwargs): 'Inner wrapper function' templated_func = tf.make_template(name_, func) return templated_func(*args, **kwargs) return func_wrapper return template_decorator
def add_dict_to_collection(dict_, collection_name): 'Adds a dictionary to a graph collection.\n\n Args:\n dict_: A dictionary of string keys to tensor values\n collection_name: The name of the collection to add the dictionary to\n ' key_collection = (collection_name + '_keys') value_collection = (collection_name + '_values') for (key, value) in dict_.items(): tf.add_to_collection(key_collection, key) tf.add_to_collection(value_collection, value)
def get_dict_from_collection(collection_name): 'Gets a dictionary from a graph collection.\n\n Args:\n collection_name: A collection name to read a dictionary from\n\n Returns:\n A dictionary with string keys and tensor values\n ' key_collection = (collection_name + '_keys') value_collection = (collection_name + '_values') keys = tf.get_collection(key_collection) values = tf.get_collection(value_collection) return dict(zip(keys, values))
def create_inference_graph(model, input_pipeline, batch_size=32): 'Creates a graph to perform inference.\n\n Args:\n task: An `InferenceTask` instance.\n input_pipeline: An instance of `InputPipeline` that defines\n how to read and parse data.\n batch_size: The batch size used for inference\n\n Returns:\n The return value of the model function, typically a tuple of\n (predictions, loss, train_op).\n ' if hasattr(model, 'use_beam_search'): if model.use_beam_search: tf.logging.info('Setting batch size to 1 for beam search.') batch_size = 1 input_fn = training_utils.create_input_fn(pipeline=input_pipeline, batch_size=batch_size, allow_smaller_final_batch=True) (features, labels) = input_fn() return model(features=features, labels=labels, params=None)
def cross_entropy_sequence_loss(logits, targets, sequence_length): 'Calculates the per-example cross-entropy loss for a sequence of logits and\n masks out all losses passed the sequence length.\n\n Args:\n logits: Logits of shape `[T, B, vocab_size]`\n targets: Target classes of shape `[T, B]`\n sequence_length: An int32 tensor of shape `[B]` corresponding\n to the length of each input\n\n Returns:\n A tensor of shape [T, B] that contains the loss per example, per time step.\n ' with tf.name_scope('cross_entropy_sequence_loss'): losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets) loss_mask = tf.sequence_mask(tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0])) losses = (losses * tf.transpose(tf.to_float(loss_mask), [1, 0])) return losses
def moses_multi_bleu(hypotheses, references, lowercase=False): 'Calculate the bleu score for hypotheses and references\n using the MOSES ulti-bleu.perl script.\n\n Args:\n hypotheses: A numpy array of strings where each string is a single example.\n references: A numpy array of strings where each string is a single example.\n lowercase: If true, pass the "-lc" flag to the multi-bleu script\n\n Returns:\n The BLEU score as a float32 value.\n ' if (np.size(hypotheses) == 0): return np.float32(0.0) try: (multi_bleu_path, _) = urllib.request.urlretrieve('https://raw.githubusercontent.com/moses-smt/mosesdecoder/master/scripts/generic/multi-bleu.perl') os.chmod(multi_bleu_path, 493) except: tf.logging.info('Unable to fetch multi-bleu.perl script, using local.') metrics_dir = os.path.dirname(os.path.realpath(__file__)) bin_dir = os.path.abspath(os.path.join(metrics_dir, '..', '..', 'bin')) multi_bleu_path = os.path.join(bin_dir, 'tools/multi-bleu.perl') hypothesis_file = tempfile.NamedTemporaryFile() hypothesis_file.write('\n'.join(hypotheses).encode('utf-8')) hypothesis_file.write(b'\n') hypothesis_file.flush() reference_file = tempfile.NamedTemporaryFile() reference_file.write('\n'.join(references).encode('utf-8')) reference_file.write(b'\n') reference_file.flush() with open(hypothesis_file.name, 'r') as read_pred: bleu_cmd = [multi_bleu_path] if lowercase: bleu_cmd += ['-lc'] bleu_cmd += [reference_file.name] try: bleu_out = subprocess.check_output(bleu_cmd, stdin=read_pred, stderr=subprocess.STDOUT) bleu_out = bleu_out.decode('utf-8') bleu_score = re.search('BLEU = (.+?),', bleu_out).group(1) bleu_score = float(bleu_score) except subprocess.CalledProcessError as error: if (error.output is not None): tf.logging.warning('multi-bleu.perl script returned non-zero exit code') tf.logging.warning(error.output) bleu_score = np.float32(0.0) hypothesis_file.close() reference_file.close() return np.float32(bleu_score)
def accumulate_strings(values, name='strings'): 'Accumulates strings into a vector.\n\n Args:\n values: A 1-d string tensor that contains values to add to the accumulator.\n\n Returns:\n A tuple (value_tensor, update_op).\n ' tf.assert_type(values, tf.string) strings = tf.Variable(name=name, initial_value=[], dtype=tf.string, trainable=False, collections=[], validate_shape=True) value_tensor = tf.identity(strings) update_op = tf.assign(ref=strings, value=tf.concat([strings, values], 0), validate_shape=False) return (value_tensor, update_op)
@six.add_metaclass(abc.ABCMeta) class TextMetricSpec(Configurable, MetricSpec): 'Abstract class for text-based metrics calculated based on\n hypotheses and references. Subclasses must implement `metric_fn`.\n\n Args:\n name: A name for the metric\n separator: A separator used to join predicted tokens. Default to space.\n eos_token: A string token used to find the end of a sequence. Hypotheses\n and references will be slcied until this token is found.\n ' def __init__(self, params, name): 'Initializer' Configurable.__init__(self, params, tf.contrib.learn.ModeKeys.EVAL) self._name = name self._eos_token = self.params['eos_token'] self._sos_token = self.params['sos_token'] self._separator = self.params['separator'] self._postproc_fn = None if self.params['postproc_fn']: self._postproc_fn = locate(self.params['postproc_fn']) if (self._postproc_fn is None): raise ValueError('postproc_fn not found: {}'.format(self.params['postproc_fn'])) @property def name(self): 'Name of the metric' return self._name @staticmethod def default_params(): return {'sos_token': 'SEQUENCE_START', 'eos_token': 'SEQUENCE_END', 'separator': ' ', 'postproc_fn': ''} def create_metric_ops(self, _inputs, labels, predictions): 'Creates (value, update_op) tensors\n ' with tf.variable_scope(self._name): predictions_flat = tf.reduce_join(predictions['predicted_tokens'], 1, separator=self._separator) labels_flat = tf.reduce_join(labels['target_tokens'], 1, separator=self._separator) (sources_value, sources_update) = accumulate_strings(values=predictions_flat, name='sources') (targets_value, targets_update) = accumulate_strings(values=labels_flat, name='targets') metric_value = tf.py_func(func=self._py_func, inp=[sources_value, targets_value], Tout=tf.float32, name='value') with tf.control_dependencies([sources_update, targets_update]): update_op = tf.identity(metric_value, name='update_op') return (metric_value, update_op) def _py_func(self, hypotheses, references): 'Wrapper function that converts tensors to unicode and slices\n them until the EOS token is found.\n ' if (hypotheses.dtype.kind == np.dtype('U')): hypotheses = np.char.encode(hypotheses, 'utf-8') if (references.dtype.kind == np.dtype('U')): references = np.char.encode(references, 'utf-8') hypotheses = [_.decode('utf-8') for _ in hypotheses] references = [_.decode('utf-8') for _ in references] sliced_hypotheses = [postproc.slice_text(_, self._eos_token, self._sos_token) for _ in hypotheses] sliced_references = [postproc.slice_text(_, self._eos_token, self._sos_token) for _ in references] if self._postproc_fn: sliced_hypotheses = [self._postproc_fn(_) for _ in sliced_hypotheses] sliced_references = [self._postproc_fn(_) for _ in sliced_references] return self.metric_fn(sliced_hypotheses, sliced_references) def metric_fn(self, hypotheses, references): 'Calculates the value of the metric.\n\n Args:\n hypotheses: A python list of strings, each corresponding to a\n single hypothesis/example.\n references: A python list of strings, each corresponds to a single\n reference. Must have the same number of elements of `hypotheses`.\n\n Returns:\n A float value.\n ' raise NotImplementedError()
class BleuMetricSpec(TextMetricSpec): 'Calculates BLEU score using the Moses multi-bleu.perl script.\n ' def __init__(self, params): super(BleuMetricSpec, self).__init__(params, 'bleu') def metric_fn(self, hypotheses, references): return bleu.moses_multi_bleu(hypotheses, references, lowercase=False)
class RougeMetricSpec(TextMetricSpec): 'Calculates BLEU score using the Moses multi-bleu.perl script.\n ' def __init__(self, params, **kwargs): if (not params['rouge_type']): raise ValueError('You must provide a rouge_type for ROUGE') super(RougeMetricSpec, self).__init__(params, params['rouge_type'], **kwargs) self._rouge_type = self.params['rouge_type'] @staticmethod def default_params(): params = TextMetricSpec.default_params() params.update({'rouge_type': ''}) return params def metric_fn(self, hypotheses, references): if ((not hypotheses) or (not references)): return np.float32(0.0) return np.float32(rouge.rouge(hypotheses, references)[self._rouge_type])
class LogPerplexityMetricSpec(MetricSpec, Configurable): 'A MetricSpec to calculate straming log perplexity' def __init__(self, params): 'Initializer' Configurable.__init__(self, params, tf.contrib.learn.ModeKeys.EVAL) @staticmethod def default_params(): return {} @property def name(self): 'Name of the metric' return 'log_perplexity' def create_metric_ops(self, _inputs, labels, predictions): 'Creates the metric op' loss_mask = tf.sequence_mask(lengths=tf.to_int32((labels['target_len'] - 1)), maxlen=tf.to_int32(tf.shape(predictions['losses'])[1])) return metrics.streaming_mean(predictions['losses'], loss_mask)
def _get_ngrams(n, text): 'Calcualtes n-grams.\n\n Args:\n n: which n-grams to calculate\n text: An array of tokens\n\n Returns:\n A set of n-grams\n ' ngram_set = set() text_length = len(text) max_index_ngram_start = (text_length - n) for i in range((max_index_ngram_start + 1)): ngram_set.add(tuple(text[i:(i + n)])) return ngram_set
def _split_into_words(sentences): 'Splits multiple sentences into words and flattens the result' return list(itertools.chain(*[_.split(' ') for _ in sentences]))
def _get_word_ngrams(n, sentences): 'Calculates word n-grams for multiple sentences.\n ' assert (len(sentences) > 0) assert (n > 0) words = _split_into_words(sentences) return _get_ngrams(n, words)
def _len_lcs(x, y): '\n Returns the length of the Longest Common Subsequence between sequences x\n and y.\n Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence\n\n Args:\n x: sequence of words\n y: sequence of words\n\n Returns\n integer: Length of LCS between x and y\n ' table = _lcs(x, y) (n, m) = (len(x), len(y)) return table[(n, m)]
def _lcs(x, y): '\n Computes the length of the longest common subsequence (lcs) between two\n strings. The implementation below uses a DP programming algorithm and runs\n in O(nm) time where n = len(x) and m = len(y).\n Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence\n\n Args:\n x: collection of words\n y: collection of words\n\n Returns:\n Table of dictionary of coord and len lcs\n ' (n, m) = (len(x), len(y)) table = dict() for i in range((n + 1)): for j in range((m + 1)): if ((i == 0) or (j == 0)): table[(i, j)] = 0 elif (x[(i - 1)] == y[(j - 1)]): table[(i, j)] = (table[((i - 1), (j - 1))] + 1) else: table[(i, j)] = max(table[((i - 1), j)], table[(i, (j - 1))]) return table
def _recon_lcs(x, y): '\n Returns the Longest Subsequence between x and y.\n Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence\n\n Args:\n x: sequence of words\n y: sequence of words\n\n Returns:\n sequence: LCS of x and y\n ' (i, j) = (len(x), len(y)) table = _lcs(x, y) def _recon(i, j): 'private recon calculation' if ((i == 0) or (j == 0)): return [] elif (x[(i - 1)] == y[(j - 1)]): return (_recon((i - 1), (j - 1)) + [(x[(i - 1)], i)]) elif (table[((i - 1), j)] > table[(i, (j - 1))]): return _recon((i - 1), j) else: return _recon(i, (j - 1)) recon_tuple = tuple(map((lambda x: x[0]), _recon(i, j))) return recon_tuple
def rouge_n(evaluated_sentences, reference_sentences, n=2): '\n Computes ROUGE-N of two text collections of sentences.\n Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/\n papers/rouge-working-note-v1.3.1.pdf\n\n Args:\n evaluated_sentences: The sentences that have been picked by the summarizer\n reference_sentences: The sentences from the referene set\n n: Size of ngram. Defaults to 2.\n\n Returns:\n A tuple (f1, precision, recall) for ROUGE-N\n\n Raises:\n ValueError: raises exception if a param has len <= 0\n ' if ((len(evaluated_sentences) <= 0) or (len(reference_sentences) <= 0)): raise ValueError('Collections must contain at least 1 sentence.') evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences) reference_ngrams = _get_word_ngrams(n, reference_sentences) reference_count = len(reference_ngrams) evaluated_count = len(evaluated_ngrams) overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams) overlapping_count = len(overlapping_ngrams) if (evaluated_count == 0): precision = 0.0 else: precision = (overlapping_count / evaluated_count) if (reference_count == 0): recall = 0.0 else: recall = (overlapping_count / reference_count) f1_score = (2.0 * ((precision * recall) / ((precision + recall) + 1e-08))) return (f1_score, precision, recall)
def _f_p_r_lcs(llcs, m, n): '\n Computes the LCS-based F-measure score\n Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/\n rouge-working-note-v1.3.1.pdf\n\n Args:\n llcs: Length of LCS\n m: number of words in reference summary\n n: number of words in candidate summary\n\n Returns:\n Float. LCS-based F-measure score\n ' r_lcs = (llcs / m) p_lcs = (llcs / n) beta = (p_lcs / (r_lcs + 1e-12)) num = (((1 + (beta ** 2)) * r_lcs) * p_lcs) denom = (r_lcs + ((beta ** 2) * p_lcs)) f_lcs = (num / (denom + 1e-12)) return (f_lcs, p_lcs, r_lcs)
def rouge_l_sentence_level(evaluated_sentences, reference_sentences): '\n Computes ROUGE-L (sentence level) of two text collections of sentences.\n http://research.microsoft.com/en-us/um/people/cyl/download/papers/\n rouge-working-note-v1.3.1.pdf\n\n Calculated according to:\n R_lcs = LCS(X,Y)/m\n P_lcs = LCS(X,Y)/n\n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)\n\n where:\n X = reference summary\n Y = Candidate summary\n m = length of reference summary\n n = length of candidate summary\n\n Args:\n evaluated_sentences: The sentences that have been picked by the summarizer\n reference_sentences: The sentences from the referene set\n\n Returns:\n A float: F_lcs\n\n Raises:\n ValueError: raises exception if a param has len <= 0\n ' if ((len(evaluated_sentences) <= 0) or (len(reference_sentences) <= 0)): raise ValueError('Collections must contain at least 1 sentence.') reference_words = _split_into_words(reference_sentences) evaluated_words = _split_into_words(evaluated_sentences) m = len(reference_words) n = len(evaluated_words) lcs = _len_lcs(evaluated_words, reference_words) return _f_p_r_lcs(lcs, m, n)
def _union_lcs(evaluated_sentences, reference_sentence): '\n Returns LCS_u(r_i, C) which is the LCS score of the union longest common\n subsequence between reference sentence ri and candidate summary C. For example\n if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8 and\n c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1 is\n “w1 w2” and the longest common subsequence of r_i and c2 is “w1 w3 w5”. The\n union longest common subsequence of r_i, c1, and c2 is “w1 w2 w3 w5” and\n LCS_u(r_i, C) = 4/5.\n\n Args:\n evaluated_sentences: The sentences that have been picked by the summarizer\n reference_sentence: One of the sentences in the reference summaries\n\n Returns:\n float: LCS_u(r_i, C)\n\n ValueError:\n Raises exception if a param has len <= 0\n ' if (len(evaluated_sentences) <= 0): raise ValueError('Collections must contain at least 1 sentence.') lcs_union = set() reference_words = _split_into_words([reference_sentence]) combined_lcs_length = 0 for eval_s in evaluated_sentences: evaluated_words = _split_into_words([eval_s]) lcs = set(_recon_lcs(reference_words, evaluated_words)) combined_lcs_length += len(lcs) lcs_union = lcs_union.union(lcs) union_lcs_count = len(lcs_union) union_lcs_value = (union_lcs_count / combined_lcs_length) return union_lcs_value
def rouge_l_summary_level(evaluated_sentences, reference_sentences): '\n Computes ROUGE-L (summary level) of two text collections of sentences.\n http://research.microsoft.com/en-us/um/people/cyl/download/papers/\n rouge-working-note-v1.3.1.pdf\n\n Calculated according to:\n R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m\n P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n\n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)\n\n where:\n SUM(i,u) = SUM from i through u\n u = number of sentences in reference summary\n C = Candidate summary made up of v sentences\n m = number of words in reference summary\n n = number of words in candidate summary\n\n Args:\n evaluated_sentences: The sentences that have been picked by the summarizer\n reference_sentence: One of the sentences in the reference summaries\n\n Returns:\n A float: F_lcs\n\n Raises:\n ValueError: raises exception if a param has len <= 0\n ' if ((len(evaluated_sentences) <= 0) or (len(reference_sentences) <= 0)): raise ValueError('Collections must contain at least 1 sentence.') m = len(_split_into_words(reference_sentences)) n = len(_split_into_words(evaluated_sentences)) union_lcs_sum_across_all_references = 0 for ref_s in reference_sentences: union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences, ref_s) return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)
def rouge(hypotheses, references): 'Calculates average rouge scores for a list of hypotheses and\n references' rouge_1 = [rouge_n([hyp], [ref], 1) for (hyp, ref) in zip(hypotheses, references)] (rouge_1_f, rouge_1_p, rouge_1_r) = map(np.mean, zip(*rouge_1)) rouge_2 = [rouge_n([hyp], [ref], 2) for (hyp, ref) in zip(hypotheses, references)] (rouge_2_f, rouge_2_p, rouge_2_r) = map(np.mean, zip(*rouge_2)) rouge_l = [rouge_l_sentence_level([hyp], [ref]) for (hyp, ref) in zip(hypotheses, references)] (rouge_l_f, rouge_l_p, rouge_l_r) = map(np.mean, zip(*rouge_l)) return {'rouge_1/f_score': rouge_1_f, 'rouge_1/r_score': rouge_1_r, 'rouge_1/p_score': rouge_1_p, 'rouge_2/f_score': rouge_2_f, 'rouge_2/r_score': rouge_2_r, 'rouge_2/p_score': rouge_2_p, 'rouge_l/f_score': rouge_l_f, 'rouge_l/r_score': rouge_l_r, 'rouge_l/p_score': rouge_l_p}
class AttentionSeq2Seq(BasicSeq2Seq): 'Sequence2Sequence model with attention mechanism.\n\n Args:\n source_vocab_info: An instance of `VocabInfo`\n for the source vocabulary\n target_vocab_info: An instance of `VocabInfo`\n for the target vocabulary\n params: A dictionary of hyperparameters\n ' def __init__(self, params, mode, name='att_seq2seq'): super(AttentionSeq2Seq, self).__init__(params, mode, name) @staticmethod def default_params(): params = BasicSeq2Seq.default_params().copy() params.update({'attention.class': 'AttentionLayerBahdanau', 'attention.params': {'num_units': 150}, 'bridge.class': 'seq2seq.models.bridges.ZeroBridge', 'encoder.class': 'seq2seq.encoders.BidirectionalRNNEncoder', 'encoder.params': {'rnn_cell': {'cell_class': 'LSTMCell', 'cell_params': {'num_units': 150}, 'dropout_input_keep_prob': 0.5, 'dropout_output_keep_prob': 0.5, 'num_layers': 1}}, 'decoder.class': 'seq2seq.decoders.AttentionDecoder', 'decoder.params': {'max_decode_length': 250, 'rnn_cell': {'cell_class': 'LSTMCell', 'cell_params': {'num_units': 150}, 'dropout_input_keep_prob': 0.5, 'dropout_output_keep_prob': 0.5, 'num_layers': 1}}, 'optimizer.name': 'Adam', 'optimizer.params': {'epsilon': 8e-07}, 'optimizer.learning_rate': 0.0005, 'source.max_seq_len': 50, 'source.reverse': False, 'target.max_seq_len': 250}) return params def _create_decoder(self, encoder_output, features, _labels): attention_class = (locate(self.params['attention.class']) or getattr(decoders.attention, self.params['attention.class'])) attention_layer = attention_class(params=self.params['attention.params'], mode=self.mode) reverse_scores_lengths = None if self.params['source.reverse']: reverse_scores_lengths = features['source_len'] if self.use_beam_search: reverse_scores_lengths = tf.tile(input=reverse_scores_lengths, multiples=[self.params['inference.beam_search.beam_width']]) decoder_mask = features['decoder_mask'] return self.decoder_class(params=self.params['decoder.params'], mode=self.mode, vocab_size=self.target_vocab_info.total_size, attention_values=encoder_output.attention_values, attention_values_length=encoder_output.attention_values_length, attention_keys=encoder_output.outputs, attention_fn=attention_layer, reverse_scores_lengths=reverse_scores_lengths, decoder_mask=decoder_mask)
def _total_tensor_depth(tensor): 'Returns the size of a tensor without the first (batch) dimension' return np.prod(tensor.get_shape().as_list()[1:])
@six.add_metaclass(abc.ABCMeta) class Bridge(Configurable): 'An abstract bridge class. A bridge defines how state is passed\n between encoder and decoder.\n\n All logic is contained in the `_create` method, which returns an\n initial state for the decoder.\n\n Args:\n encoder_outputs: A namedtuple that corresponds to the the encoder outputs.\n decoder_state_size: An integer or tuple of integers defining the\n state size of the decoder.\n ' def __init__(self, encoder_outputs, decoder_state_size, params, mode): Configurable.__init__(self, params, mode) self.encoder_outputs = encoder_outputs self.decoder_state_size = decoder_state_size self.batch_size = tf.shape(nest.flatten(self.encoder_outputs.final_state)[0])[0] def __call__(self): 'Runs the bridge function.\n\n Returns:\n An initial decoder_state tensor or tuple of tensors.\n ' return self._create() @abc.abstractmethod def _create(self): ' Implements the logic for this bridge.\n This function should be implemented by child classes.\n\n Returns:\n A tuple initial_decoder_state tensor or tuple of tensors.\n ' raise NotImplementedError('Must be implemented by child class')
class ZeroBridge(Bridge): 'A bridge that does not pass any information between encoder and decoder\n and sets the initial decoder state to 0. The input function is not modified.\n ' @staticmethod def default_params(): return {} def _create(self): zero_state = nest.map_structure((lambda x: tf.zeros([self.batch_size, x], dtype=tf.float32)), self.decoder_state_size) return zero_state
class PassThroughBridge(Bridge): 'Passes the encoder state through to the decoder as-is. This bridge\n can only be used if encoder and decoder have the exact same state size, i.e.\n use the same RNN cell.\n ' @staticmethod def default_params(): return {} def _create(self): nest.assert_same_structure(self.encoder_outputs.final_state, self.decoder_state_size) return self.encoder_outputs.final_state
class InitialStateBridge(Bridge): 'A bridge that creates an initial decoder state based on the output\n of the encoder. This state is created by passing the encoder outputs\n through an additional layer to match them to the decoder state size.\n The input function remains unmodified.\n\n Args:\n encoder_outputs: A namedtuple that corresponds to the the encoder outputs.\n decoder_state_size: An integer or tuple of integers defining the\n state size of the decoder.\n bridge_input: Which attribute of the `encoder_outputs` to use for the\n initial state calculation. For example, "final_state" means that\n `encoder_outputs.final_state` will be used.\n activation_fn: An optional activation function for the extra\n layer inserted between encoder and decoder. A string for a function\n name contained in `tf.nn`, e.g. "tanh".\n ' def __init__(self, encoder_outputs, decoder_state_size, params, mode): super(InitialStateBridge, self).__init__(encoder_outputs, decoder_state_size, params, mode) if (not hasattr(encoder_outputs, self.params['bridge_input'])): raise ValueError('Invalid bridge_input not in encoder outputs.') self._bridge_input = getattr(encoder_outputs, self.params['bridge_input']) self._activation_fn = locate(self.params['activation_fn']) @staticmethod def default_params(): return {'bridge_input': 'final_state', 'activation_fn': 'tensorflow.identity'} def _create(self): bridge_input = nest.map_structure((lambda x: tf.reshape(x, [self.batch_size, _total_tensor_depth(x)])), self._bridge_input) bridge_input_flat = nest.flatten([bridge_input]) bridge_input_concat = tf.concat(bridge_input_flat, 1) state_size_splits = nest.flatten(self.decoder_state_size) total_decoder_state_size = sum(state_size_splits) initial_state_flat = tf.contrib.layers.fully_connected(inputs=bridge_input_concat, num_outputs=total_decoder_state_size, activation_fn=self._activation_fn) initial_state = tf.split(initial_state_flat, state_size_splits, axis=1) return nest.pack_sequence_as(self.decoder_state_size, initial_state)
def _flatten_dict(dict_, parent_key='', sep='.'): 'Flattens a nested dictionary. Namedtuples within\n the dictionary are converted to dicts.\n\n Args:\n dict_: The dictionary to flatten.\n parent_key: A prefix to prepend to each key.\n sep: Separator between parent and child keys, a string. For example\n { "a": { "b": 3 } } will become { "a.b": 3 } if the separator is ".".\n\n Returns:\n A new flattened dictionary.\n ' items = [] for (key, value) in dict_.items(): new_key = (((parent_key + sep) + key) if parent_key else key) if isinstance(value, collections.MutableMapping): items.extend(_flatten_dict(value, new_key, sep=sep).items()) elif (isinstance(value, tuple) and hasattr(value, '_asdict')): dict_items = collections.OrderedDict(zip(value._fields, value)) items.extend(_flatten_dict(dict_items, new_key, sep=sep).items()) else: items.append((new_key, value)) return dict(items)
class ModelBase(Configurable): 'Abstract base class for models.\n\n Args:\n params: A dictionary of hyperparameter values\n name: A name for this model to be used as a variable scope\n ' def __init__(self, params, mode, name): self.name = name Configurable.__init__(self, params, mode) def _clip_gradients(self, grads_and_vars): 'Clips gradients by global norm.' (gradients, variables) = zip(*grads_and_vars) (clipped_gradients, _) = tf.clip_by_global_norm(gradients, self.params['optimizer.clip_gradients']) return list(zip(clipped_gradients, variables)) def _create_optimizer(self): 'Creates the optimizer' name = self.params['optimizer.name'] optimizer = tf.contrib.layers.OPTIMIZER_CLS_NAMES[name](learning_rate=self.params['optimizer.learning_rate'], **self.params['optimizer.params']) if (self.params['optimizer.sync_replicas'] > 0): optimizer = tf.train.SyncReplicasOptimizer(opt=optimizer, replicas_to_aggregate=self.params['optimizer.sync_replicas_to_aggregate'], total_num_replicas=self.params['optimizer.sync_replicas']) global_vars.SYNC_REPLICAS_OPTIMIZER = optimizer return optimizer def _build_train_op(self, loss): 'Creates the training operation' learning_rate_decay_fn = training_utils.create_learning_rate_decay_fn(decay_type=(self.params['optimizer.lr_decay_type'] or None), decay_steps=self.params['optimizer.lr_decay_steps'], decay_rate=self.params['optimizer.lr_decay_rate'], start_decay_at=self.params['optimizer.lr_start_decay_at'], stop_decay_at=self.params['optimizer.lr_stop_decay_at'], min_learning_rate=self.params['optimizer.lr_min_learning_rate'], staircase=self.params['optimizer.lr_staircase']) optimizer = self._create_optimizer() train_op = tf.contrib.layers.optimize_loss(loss=loss, global_step=tf.contrib.framework.get_global_step(), learning_rate=self.params['optimizer.learning_rate'], learning_rate_decay_fn=learning_rate_decay_fn, clip_gradients=self._clip_gradients, optimizer=optimizer, summaries=['learning_rate', 'loss', 'gradients', 'gradient_norm']) return train_op @staticmethod def default_params(): 'Returns a dictionary of default parameters for this model.' return {'optimizer.name': 'Adam', 'optimizer.learning_rate': 0.0001, 'optimizer.params': {}, 'optimizer.lr_decay_type': '', 'optimizer.lr_decay_steps': 100, 'optimizer.lr_decay_rate': 0.99, 'optimizer.lr_start_decay_at': 0, 'optimizer.lr_stop_decay_at': tf.int32.max, 'optimizer.lr_min_learning_rate': 1e-12, 'optimizer.lr_staircase': False, 'optimizer.clip_gradients': 5.0, 'optimizer.sync_replicas': 0, 'optimizer.sync_replicas_to_aggregate': 0} def batch_size(self, features, labels): 'Returns the batch size for a batch of examples' raise NotImplementedError() def __call__(self, features, labels, params): 'Creates the model graph. See the model_fn documentation in\n tf.contrib.learn.Estimator class for a more detailed explanation.\n ' with tf.variable_scope('model'): with tf.variable_scope(self.name): return self._build(features, labels, params) def _build(self, features, labels, params): 'Subclasses should implement this method. See the `model_fn` documentation\n in tf.contrib.learn.Estimator class for a more detailed explanation.\n ' raise NotImplementedError
class SchemaAttentionSeq2Seq(BasicSeq2Seq): 'Sequence2Sequence model with attention mechanism for both input sequence\n and database schema.\n\n Args:\n source_vocab_info: An instance of `VocabInfo`\n for the source vocabulary\n target_vocab_info: An instance of `VocabInfo`\n for the target vocabulary\n params: A dictionary of hyperparameters\n ' def __init__(self, params, mode, name='schema_att_seq2seq'): super(SchemaAttentionSeq2Seq, self).__init__(params, mode, name) @staticmethod def default_params(): params = BasicSeq2Seq.default_params().copy() params.update({'attention.class': 'AttentionLayerBahdanau', 'attention.params': {'num_units': 150}, 'bridge.class': 'seq2seq.models.bridges.ZeroBridge', 'encoder.class': 'seq2seq.encoders.BidirectionalRNNEncoder', 'encoder.params': {'rnn_cell': {'cell_class': 'LSTMCell', 'cell_params': {'num_units': 150}, 'dropout_input_keep_prob': 0.5, 'dropout_output_keep_prob': 0.5, 'num_layers': 1}}, 'decoder.class': 'seq2seq.decoders.SchemaAttentionDecoder', 'decoder.params': {'max_decode_length': 250, 'rnn_cell': {'cell_class': 'LSTMCell', 'cell_params': {'num_units': 150}, 'dropout_input_keep_prob': 0.5, 'dropout_output_keep_prob': 0.5, 'num_layers': 1}}, 'optimizer.name': 'Adam', 'optimizer.params': {'epsilon': 8e-07}, 'optimizer.learning_rate': 0.0005, 'schema.attention.class': 'AttentionLayerBahdanau', 'schema.attention.params': {'num_units': 150}, 'source.max_seq_len': 50, 'source.reverse': False, 'target.max_seq_len': 250}) return params def _get_tables_and_ids(self, features): schema_tables = graph_utils.get_dict_from_collection('schema_tables') schema_locs = features['schema_loc'] table = schema_tables['schema_file_lookup_table'] ids = table.lookup(schema_locs) return (schema_tables, ids) def _schema_lookups(self, features): (schema_tables, ids) = self._get_tables_and_ids(features) all_schema_embeddings = schema_tables['all_schema_embeddings'] schema_embeddings_3d = tf.squeeze(tf.gather(all_schema_embeddings, ids), [1]) schema_lengths = schema_tables['schema_lengths'] schema_attn_values_length = tf.squeeze(tf.gather(schema_lengths, ids), [1]) return (schema_embeddings_3d, schema_embeddings_3d, schema_attn_values_length) def _get_decoder_args(self, encoder_output, features, _labels): attention_class = (locate(self.params['attention.class']) or getattr(decoders.attention, self.params['attention.class'])) attention_layer = attention_class(params=self.params['attention.params'], mode=self.mode) schema_attention_class = (locate(self.params['schema.attention.class']) or getattr(decoders.attention, self.params['schema.attention.class'])) schema_attention_layer = schema_attention_class(params=self.params['schema.attention.params'], mode=self.mode) reverse_scores_lengths = None if self.params['source.reverse']: reverse_scores_lengths = features['source_len'] if self.use_beam_search: reverse_scores_lengths = tf.tile(input=reverse_scores_lengths, multiples=[self.params['inference.beam_search.beam_width']]) mode = self.mode params = self.params['decoder.params'] vocab_size = self.target_vocab_info.total_size return (params, mode, vocab_size, encoder_output.attention_values, encoder_output.attention_values_length, encoder_output.outputs, attention_layer, reverse_scores_lengths, schema_attention_layer) def _create_decoder(self, encoder_output, features, _labels): (params, mode, vocab_size, attention_values, attention_values_length, attention_keys, attention_fn, reverse_scores_lengths, schema_attention_fn) = self._get_decoder_args(encoder_output, features, _labels) (schema_attention_keys, schema_attention_values, schema_attention_values_length) = self._schema_lookups(features) return self.decoder_class(params=params, mode=mode, vocab_size=vocab_size, attention_values=attention_values, attention_values_length=attention_values_length, attention_keys=attention_keys, attention_fn=attention_fn, reverse_scores_lengths=reverse_scores_lengths, schema_attention_keys=schema_attention_keys, schema_attention_values=schema_attention_values, schema_attention_values_length=schema_attention_values_length, schema_attention_fn=schema_attention_fn)
class SchemaMapAttentionSeq2Seq(SchemaAttentionSeq2Seq): 'Seq2Seq model with attention to input, schema, and schema map.\n Args:\n source_vocab_info: An instance of `VocabInfo`\n for the source vocabulary\n target_vocab_info: An instance of `VocabInfo`\n for the target vocabulary\n params: A dictionary of hyperparameters\n ' def __init__(self, params, mode, name='schema_map_att_seq2seq'): super(SchemaAttentionSeq2Seq, self).__init__(params, mode, name) @staticmethod def default_params(): params = SchemaAttentionSeq2Seq.default_params().copy() params.update({'decoder.class': 'seq2seq.decoders.SchemaMapAttentionDecoder', 'schema_map.attention.class': 'AttentionLayerBahdanau', 'schema_map.attention.params': {'num_units': 150}}) return params def _schema_lookups(self, features): (schema_tables, ids) = self._get_tables_and_ids(features) all_schema_embeddings = schema_tables['all_schema_embeddings'] schema_embeddings_3d = tf.squeeze(tf.gather(all_schema_embeddings, ids), [1]) all_schema_maps = schema_tables['all_schema_maps'] schema_maps_3d = tf.squeeze(tf.gather(all_schema_maps, ids), [1]) schema_lengths = schema_tables['schema_lengths'] schema_attn_values_length = tf.squeeze(tf.gather(schema_lengths, ids), [1]) schema_map_lengths = schema_tables['schema_map_lengths'] schema_map_attn_values_length = tf.squeeze(tf.gather(schema_map_lengths, ids), [1]) return (schema_embeddings_3d, schema_embeddings_3d, schema_attn_values_length, schema_maps_3d, schema_maps_3d, schema_map_attn_values_length) def _create_decoder(self, encoder_output, features, _labels): (params, mode, vocab_size, attention_values, attention_values_length, attention_keys, attention_fn, reverse_scores_lengths, schema_attention_fn) = self._get_decoder_args(encoder_output, features, _labels) (schema_attention_keys, schema_attention_values, schema_attention_values_length, schema_map_attention_keys, schema_map_attention_values, schema_map_attention_values_length) = self._schema_lookups(features) schema_map_attention_class = (locate(self.params['schema_map.attention.class']) or getattr(decoders.attention, self.params['schema.attention.class'])) schema_map_attention_layer = schema_map_attention_class(params=self.params['schema_map.attention.params'], mode=self.mode) return self.decoder_class(params=params, mode=mode, vocab_size=vocab_size, attention_values=attention_values, attention_values_length=attention_values_length, attention_keys=attention_keys, attention_fn=attention_fn, reverse_scores_lengths=reverse_scores_lengths, schema_attention_keys=schema_attention_keys, schema_attention_values=schema_attention_values, schema_attention_values_length=schema_attention_values_length, schema_attention_fn=schema_attention_fn, schema_map_attention_keys=schema_map_attention_keys, schema_map_attention_values=schema_map_attention_values, schema_map_attention_values_length=schema_map_attention_values_length, schema_map_attention_fn=schema_map_attention_layer)
class DumpBeams(InferenceTask): 'Defines inference for tasks where both the input and output sequences\n are plain text.\n\n Params:\n file: File to write beam search information to.\n ' def __init__(self, params): super(DumpBeams, self).__init__(params) self._beam_accum = {'predicted_ids': [], 'beam_parent_ids': [], 'scores': [], 'log_probs': []} if (not self.params['file']): raise ValueError('Must specify file for DumpBeams') @staticmethod def default_params(): params = {} params.update({'file': ''}) return params def before_run(self, _run_context): fetches = {} fetches['beam_search_output.predicted_ids'] = self._predictions['beam_search_output.predicted_ids'] fetches['beam_search_output.beam_parent_ids'] = self._predictions['beam_search_output.beam_parent_ids'] fetches['beam_search_output.scores'] = self._predictions['beam_search_output.scores'] fetches['beam_search_output.log_probs'] = self._predictions['beam_search_output.log_probs'] return tf.train.SessionRunArgs(fetches) def after_run(self, _run_context, run_values): fetches_batch = run_values.results for fetches in unbatch_dict(fetches_batch): self._beam_accum['predicted_ids'].append(fetches['beam_search_output.predicted_ids']) self._beam_accum['beam_parent_ids'].append(fetches['beam_search_output.beam_parent_ids']) self._beam_accum['scores'].append(fetches['beam_search_output.scores']) self._beam_accum['log_probs'].append(fetches['beam_search_output.log_probs']) def end(self, _session): np.savez(self.params['file'], **self._beam_accum)
def unbatch_dict(dict_): 'Converts a dictionary of batch items to a batch/list of\n dictionary items.\n ' batch_size = list(dict_.values())[0].shape[0] for i in range(batch_size): (yield {key: value[i] for (key, value) in dict_.items()})
@six.add_metaclass(abc.ABCMeta) class InferenceTask(tf.train.SessionRunHook, Configurable): '\n Abstract base class for inference tasks. Defines the logic used to make\n predictions for a specific type of task.\n\n Params:\n model_class: The model class to instantiate. If undefined,\n re-uses the class used during training.\n model_params: Model hyperparameters. Specified hyperparameters will\n overwrite those used during training.\n\n Args:\n params: See Params above.\n ' def __init__(self, params): Configurable.__init__(self, params, tf.contrib.learn.ModeKeys.INFER) self._predictions = None def begin(self): self._predictions = graph_utils.get_dict_from_collection('predictions') @abstractstaticmethod def default_params(): raise NotImplementedError()
class AttentionLayerTest(tf.test.TestCase): '\n Tests the AttentionLayer module.\n ' def setUp(self): super(AttentionLayerTest, self).setUp() tf.logging.set_verbosity(tf.logging.INFO) self.batch_size = 8 self.attention_dim = 128 self.input_dim = 16 self.seq_len = 10 self.state_dim = 32 def _create_layer(self): 'Creates the attention layer. Should be implemented by child classes' raise NotImplementedError def _test_layer(self): 'Tests Attention layer with a given score type' inputs_pl = tf.placeholder(tf.float32, (None, None, self.input_dim)) inputs_length_pl = tf.placeholder(tf.int32, [None]) state_pl = tf.placeholder(tf.float32, (None, self.state_dim)) attention_fn = self._create_layer() (scores, context) = attention_fn(query=state_pl, keys=inputs_pl, values=inputs_pl, values_length=inputs_length_pl) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) feed_dict = {} feed_dict[inputs_pl] = np.random.randn(self.batch_size, self.seq_len, self.input_dim) feed_dict[state_pl] = np.random.randn(self.batch_size, self.state_dim) feed_dict[inputs_length_pl] = (np.arange(self.batch_size) + 1) (scores_, context_) = sess.run([scores, context], feed_dict) np.testing.assert_array_equal(scores_.shape, [self.batch_size, self.seq_len]) np.testing.assert_array_equal(context_.shape, [self.batch_size, self.input_dim]) for (idx, batch) in enumerate(scores_, 1): np.testing.assert_array_equal(batch[idx:], np.zeros_like(batch[idx:])) scores_sum = np.sum(scores_, axis=1) np.testing.assert_array_almost_equal(scores_sum, np.ones([self.batch_size]))
class AttentionLayerDotTest(AttentionLayerTest): 'Tests the AttentionLayerDot class' def _create_layer(self): return AttentionLayerDot(params={'num_units': self.attention_dim}, mode=tf.contrib.learn.ModeKeys.TRAIN) def test_layer(self): self._test_layer()
class AttentionLayerBahdanauTest(AttentionLayerTest): 'Tests the AttentionLayerBahdanau class' def _create_layer(self): return AttentionLayerBahdanau(params={'num_units': self.attention_dim}, mode=tf.contrib.learn.ModeKeys.TRAIN) def test_layer(self): self._test_layer()
class TestGatherTree(tf.test.TestCase): 'Tests the gather_tree function' def test_gather_tree(self): predicted_ids = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) parent_ids = np.array([[0, 0, 0], [0, 1, 1], [2, 1, 2]]) expected_result = np.array([[2, 2, 2], [6, 5, 6], [7, 8, 9]]) res = beam_search.gather_tree(tf.convert_to_tensor(predicted_ids), tf.convert_to_tensor(parent_ids)) with self.test_session() as sess: res_ = sess.run(res) np.testing.assert_array_equal(expected_result, res_)
class TestLengthNorm(tf.test.TestCase): 'Tests the length normalization score' def test_length_norm(self): lengths_ = np.array([[1, 2, 3], [3, 3, 3]]) penalty_factor_ = 0.6 length_pen = beam_search.length_penalty(sequence_lengths=tf.convert_to_tensor(lengths_), penalty_factor=penalty_factor_) with self.test_session() as sess: length_pen_ = sess.run(length_pen) np.testing.assert_almost_equal(length_pen_[(0, 0)], 1.0, decimal=5) np.testing.assert_almost_equal(length_pen_[(0, 1)], 1.0969027, decimal=4) np.testing.assert_almost_equal(length_pen_[(0, 2)], 1.1884017, decimal=4)
class TestBeamStep(tf.test.TestCase): 'Tests a single step of beam search\n ' def setUp(self): super(TestBeamStep, self).setUp() self.state_size = 10 config = beam_search.BeamSearchConfig(beam_width=3, vocab_size=5, eos_token=0, length_penalty_weight=0.6, choose_successors_fn=beam_search.choose_top_k) self.config = config def test_step(self): beam_state = beam_search.BeamSearchState(log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)), lengths=tf.constant(2, shape=[self.config.beam_width], dtype=tf.int32), finished=tf.zeros([self.config.beam_width], dtype=tf.bool)) logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001) logits_[(0, 2)] = 1.9 logits_[(0, 3)] = 2.1 logits_[(1, 3)] = 3.1 logits_[(1, 4)] = 0.9 logits = tf.convert_to_tensor(logits_, dtype=tf.float32) log_probs = tf.nn.log_softmax(logits) (outputs, next_beam_state) = beam_search.beam_search_step(time_=2, logits=logits, beam_state=beam_state, config=self.config) with self.test_session() as sess: (outputs_, next_state_, state_, log_probs_) = sess.run([outputs, next_beam_state, beam_state, log_probs]) np.testing.assert_array_equal(outputs_.predicted_ids, [3, 3, 2]) np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 0]) np.testing.assert_array_equal(next_state_.lengths, [3, 3, 3]) np.testing.assert_array_equal(next_state_.finished, [False, False, False]) expected_log_probs = state_.log_probs[[1, 0, 0]] expected_log_probs[0] += log_probs_[(1, 3)] expected_log_probs[1] += log_probs_[(0, 3)] expected_log_probs[2] += log_probs_[(0, 2)] np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs) def test_step_with_eos(self): beam_state = beam_search.BeamSearchState(log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)), lengths=tf.convert_to_tensor([2, 1, 2], dtype=tf.int32), finished=tf.constant([False, True, False], dtype=tf.bool)) logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001) logits_[(0, 2)] = 1.1 logits_[(1, 2)] = 1.0 logits_[(2, 2)] = 1.0 logits = tf.convert_to_tensor(logits_, dtype=tf.float32) log_probs = tf.nn.log_softmax(logits) (outputs, next_beam_state) = beam_search.beam_search_step(time_=2, logits=logits, beam_state=beam_state, config=self.config) with self.test_session() as sess: (outputs_, next_state_, state_, log_probs_) = sess.run([outputs, next_beam_state, beam_state, log_probs]) np.testing.assert_array_equal(outputs_.predicted_ids, [0, 2, 2]) np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 2]) np.testing.assert_array_equal(next_state_.lengths, [1, 3, 3]) np.testing.assert_array_equal(next_state_.finished, [True, False, False]) expected_log_probs = state_.log_probs[outputs_.beam_parent_ids] expected_log_probs[1] += log_probs_[(0, 2)] expected_log_probs[2] += log_probs_[(2, 2)] np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs) def test_step_with_new_eos(self): beam_state = beam_search.BeamSearchState(log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)), lengths=tf.constant(2, shape=[self.config.beam_width], dtype=tf.int32), finished=tf.zeros([self.config.beam_width], dtype=tf.bool)) logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001) logits_[(0, 0)] = 1.9 logits_[(0, 3)] = 2.1 logits_[(1, 3)] = 3.1 logits_[(1, 4)] = 0.9 logits = tf.convert_to_tensor(logits_, dtype=tf.float32) log_probs = tf.nn.log_softmax(logits) (outputs, next_beam_state) = beam_search.beam_search_step(time_=2, logits=logits, beam_state=beam_state, config=self.config) with self.test_session() as sess: (outputs_, next_state_, state_, log_probs_) = sess.run([outputs, next_beam_state, beam_state, log_probs]) np.testing.assert_array_equal(outputs_.predicted_ids, [3, 3, 0]) np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 0]) np.testing.assert_array_equal(next_state_.lengths, [3, 3, 2]) np.testing.assert_array_equal(next_state_.finished, [False, False, True]) expected_log_probs = state_.log_probs[[1, 0, 0]] expected_log_probs[0] += log_probs_[(1, 3)] expected_log_probs[1] += log_probs_[(0, 3)] expected_log_probs[2] += log_probs_[(0, 0)] np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs)
class TestEosMasking(tf.test.TestCase): 'Tests EOS masking used in beam search\n ' def test_eos_masking(self): probs = tf.constant([[(- 0.2), (- 0.2), (- 0.2), (- 0.2), (- 0.2)], [(- 0.3), (- 0.3), (- 0.3), 3, 0], [5, 6, 0, 0, 0]]) eos_token = 0 previously_finished = tf.constant([0, 1, 0], dtype=tf.float32) masked = beam_search.mask_probs(probs, eos_token, previously_finished) with self.test_session() as sess: probs = sess.run(probs) masked = sess.run(masked) np.testing.assert_array_equal(probs[0], masked[0]) np.testing.assert_array_equal(probs[2], masked[2]) np.testing.assert_equal(masked[1][0], 0) np.testing.assert_approx_equal(masked[1][1], np.finfo('float32').min) np.testing.assert_approx_equal(masked[1][2], np.finfo('float32').min) np.testing.assert_approx_equal(masked[1][3], np.finfo('float32').min) np.testing.assert_approx_equal(masked[1][4], np.finfo('float32').min)
class BridgeTest(tf.test.TestCase): 'Abstract class for bridge tests' def setUp(self): super(BridgeTest, self).setUp() self.batch_size = 4 self.encoder_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.GRUCell(4), tf.contrib.rnn.GRUCell(8)]) self.decoder_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.LSTMCell(16), tf.contrib.rnn.GRUCell(8)]) final_encoder_state = nest.map_structure((lambda x: tf.convert_to_tensor(value=np.random.randn(self.batch_size, x), dtype=tf.float32)), self.encoder_cell.state_size) self.encoder_outputs = EncoderOutput(outputs=tf.convert_to_tensor(value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32), attention_values=tf.convert_to_tensor(value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32), attention_values_length=np.full([self.batch_size], 10), final_state=final_encoder_state) def _create_bridge(self): 'Creates the bridge class to be tests. Must be implemented by\n child classes' raise NotImplementedError() def _assert_correct_outputs(self): 'Asserts bridge outputs are correct. Must be implemented by\n child classes' raise NotImplementedError() def _run(self, scope=None, **kwargs): 'Runs the bridge with the given arguments\n ' with tf.variable_scope((scope or 'bridge')): bridge = self._create_bridge(**kwargs) initial_state = bridge() with self.test_session() as sess: sess.run(tf.global_variables_initializer()) initial_state_ = sess.run(initial_state) return initial_state_
class TestZeroBridge(BridgeTest): 'Tests for the ZeroBridge class' def _create_bridge(self, **kwargs): return ZeroBridge(encoder_outputs=self.encoder_outputs, decoder_state_size=self.decoder_cell.state_size, params=kwargs, mode=tf.contrib.learn.ModeKeys.TRAIN) def _assert_correct_outputs(self, initial_state_): initial_state_flat_ = nest.flatten(initial_state_) for element in initial_state_flat_: np.testing.assert_array_equal(element, np.zeros_like(element)) def test_zero_bridge(self): self._assert_correct_outputs(self._run())
class TestPassThroughBridge(BridgeTest): 'Tests for the ZeroBridge class' def _create_bridge(self, **kwargs): return PassThroughBridge(encoder_outputs=self.encoder_outputs, decoder_state_size=self.decoder_cell.state_size, params=kwargs, mode=tf.contrib.learn.ModeKeys.TRAIN) def _assert_correct_outputs(self, initial_state_): nest.assert_same_structure(initial_state_, self.decoder_cell.state_size) nest.assert_same_structure(initial_state_, self.encoder_outputs.final_state) encoder_state_flat = nest.flatten(self.encoder_outputs.final_state) with self.test_session() as sess: encoder_state_flat_ = sess.run(encoder_state_flat) initial_state_flat_ = nest.flatten(initial_state_) for (e_dec, e_enc) in zip(initial_state_flat_, encoder_state_flat_): np.testing.assert_array_equal(e_dec, e_enc) def test_passthrough_bridge(self): self.decoder_cell = self.encoder_cell self._assert_correct_outputs(self._run())
class TestInitialStateBridge(BridgeTest): 'Tests for the InitialStateBridge class' def _create_bridge(self, **kwargs): return InitialStateBridge(encoder_outputs=self.encoder_outputs, decoder_state_size=self.decoder_cell.state_size, params=kwargs, mode=tf.contrib.learn.ModeKeys.TRAIN) def _assert_correct_outputs(self, initial_state_): nest.assert_same_structure(initial_state_, self.decoder_cell.state_size) def test_with_final_state(self): self._assert_correct_outputs(self._run(bridge_input='final_state')) def test_with_outputs(self): self._assert_correct_outputs(self._run(bridge_input='outputs')) def test_with_activation_fn(self): self._assert_correct_outputs(self._run(bridge_input='final_state', activation_fn='tanh'))
class ConvEncoderTest(tf.test.TestCase): '\n Tests the ConvEncoder class.\n ' def setUp(self): super(ConvEncoderTest, self).setUp() self.batch_size = 4 self.sequence_length = 16 self.input_depth = 10 self.mode = tf.contrib.learn.ModeKeys.TRAIN def _test_with_params(self, params): 'Tests the encoder with a given parameter configuration' inputs = tf.random_normal([self.batch_size, self.sequence_length, self.input_depth]) example_length = (tf.ones(self.batch_size, dtype=tf.int32) * self.sequence_length) encode_fn = ConvEncoder(params, self.mode) encoder_output = encode_fn(inputs, example_length) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) encoder_output_ = sess.run(encoder_output) att_value_units = encode_fn.params['attention_cnn.units'] output_units = encode_fn.params['output_cnn.units'] np.testing.assert_array_equal(encoder_output_.outputs.shape, [self.batch_size, self.sequence_length, att_value_units]) np.testing.assert_array_equal(encoder_output_.attention_values.shape, [self.batch_size, self.sequence_length, output_units]) np.testing.assert_array_equal(encoder_output_.final_state.shape, [self.batch_size, output_units]) def test_encode_with_pos(self): self._test_with_params({'position_embeddings.enable': True, 'position_embeddings.num_positions': self.sequence_length, 'attention_cnn.units': 5, 'output_cnn.units': 6})
class SplitTokensDecoderTest(tf.test.TestCase): 'Tests the SplitTokensDecoder class\n ' def test_decode(self): decoder = split_tokens_decoder.SplitTokensDecoder(delimiter=' ', tokens_feature_name='source_tokens', length_feature_name='source_len') self.assertEqual(decoder.list_items(), ['source_tokens', 'source_len']) data = tf.constant('Hello world ! 笑w') decoded_tokens = decoder.decode(data, ['source_tokens']) decoded_length = decoder.decode(data, ['source_len']) decoded_both = decoder.decode(data, decoder.list_items()) with self.test_session() as sess: decoded_tokens_ = sess.run(decoded_tokens)[0] decoded_length_ = sess.run(decoded_length)[0] decoded_both_ = sess.run(decoded_both) self.assertEqual(decoded_length_, 4) np.testing.assert_array_equal(np.char.decode(decoded_tokens_.astype('S'), 'utf-8'), ['Hello', 'world', '!', '笑w']) self.assertEqual(decoded_both_[1], 4) np.testing.assert_array_equal(np.char.decode(decoded_both_[0].astype('S'), 'utf-8'), ['Hello', 'world', '!', '笑w'])
class ParallelDataProviderTest(tf.test.TestCase): 'Tests the ParallelDataProvider class\n ' def setUp(self): super(ParallelDataProviderTest, self).setUp() self.source_lines = ['Hello', 'World', '!', '笑'] self.target_lines = ['1', '2', '3', '笑'] self.source_to_target = dict(zip(self.source_lines, self.target_lines)) self.source_file = tempfile.NamedTemporaryFile() self.target_file = tempfile.NamedTemporaryFile() self.source_file.write('\n'.join(self.source_lines).encode('utf-8')) self.source_file.flush() self.target_file.write('\n'.join(self.target_lines).encode('utf-8')) self.target_file.flush() def tearDown(self): super(ParallelDataProviderTest, self).tearDown() self.source_file.close() self.target_file.close() def test_reading(self): num_epochs = 50 data_provider = make_parallel_data_provider(data_sources_source=[self.source_file.name], data_sources_target=[self.target_file.name], num_epochs=num_epochs, shuffle=True) item_keys = list(data_provider.list_items()) item_values = data_provider.get(item_keys) items_dict = dict(zip(item_keys, item_values)) self.assertEqual(set(item_keys), set(['source_tokens', 'source_len', 'target_tokens', 'target_len'])) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) with tf.contrib.slim.queues.QueueRunners(sess): item_dicts_ = [sess.run(items_dict) for _ in range((num_epochs * 3))] for item_dict in item_dicts_: item_dict['target_tokens'] = np.char.decode(item_dict['target_tokens'].astype('S'), 'utf-8') item_dict['source_tokens'] = np.char.decode(item_dict['source_tokens'].astype('S'), 'utf-8') self.assertEqual(item_dict['source_len'], 2) self.assertEqual(item_dict['source_tokens'][(- 1)], 'SEQUENCE_END') self.assertEqual(item_dict['target_len'], 3) self.assertEqual(item_dict['target_tokens'][0], 'SEQUENCE_START') self.assertEqual(item_dict['target_tokens'][(- 1)], 'SEQUENCE_END') source_joined = ' '.join(item_dict['source_tokens'][:(- 1)]) expected_target = self.source_to_target[source_joined] np.testing.assert_array_equal(item_dict['target_tokens'], ((['SEQUENCE_START'] + expected_target.split(' ')) + ['SEQUENCE_END'])) def test_reading_without_targets(self): num_epochs = 50 data_provider = make_parallel_data_provider(data_sources_source=[self.source_file.name], data_sources_target=None, num_epochs=num_epochs, shuffle=True) item_keys = list(data_provider.list_items()) item_values = data_provider.get(item_keys) items_dict = dict(zip(item_keys, item_values)) self.assertEqual(set(item_keys), set(['source_tokens', 'source_len'])) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) with tf.contrib.slim.queues.QueueRunners(sess): item_dicts_ = [sess.run(items_dict) for _ in range((num_epochs * 3))] for item_dict in item_dicts_: self.assertEqual(item_dict['source_len'], 2) item_dict['source_tokens'] = np.char.decode(item_dict['source_tokens'].astype('S'), 'utf-8') self.assertEqual(item_dict['source_tokens'][(- 1)], 'SEQUENCE_END')
class DecoderTests(object): '\n A collection of decoder tests. This class should be inherited together with\n `tf.test.TestCase`.\n ' def __init__(self): self.batch_size = 4 self.sequence_length = 16 self.input_depth = 10 self.vocab_size = 100 self.max_decode_length = 20 def create_decoder(self, helper, mode): 'Creates the decoder module.\n\n This must be implemented by child classes and instantiate the appropriate\n decoder to be tested.\n ' raise NotImplementedError def test_with_fixed_inputs(self): inputs = tf.random_normal([self.batch_size, self.sequence_length, self.input_depth]) seq_length = (tf.ones(self.batch_size, dtype=tf.int32) * self.sequence_length) helper = decode_helper.TrainingHelper(inputs=inputs, sequence_length=seq_length) decoder_fn = self.create_decoder(helper=helper, mode=tf.contrib.learn.ModeKeys.TRAIN) initial_state = decoder_fn.cell.zero_state(self.batch_size, dtype=tf.float32) (decoder_output, _) = decoder_fn(initial_state, helper) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) decoder_output_ = sess.run(decoder_output) np.testing.assert_array_equal(decoder_output_.logits.shape, [self.sequence_length, self.batch_size, self.vocab_size]) np.testing.assert_array_equal(decoder_output_.predicted_ids.shape, [self.sequence_length, self.batch_size]) return decoder_output_ def test_gradients(self): inputs = tf.random_normal([self.batch_size, self.sequence_length, self.input_depth]) seq_length = (tf.ones(self.batch_size, dtype=tf.int32) * self.sequence_length) labels = np.random.randint(0, self.vocab_size, [self.batch_size, self.sequence_length]) helper = decode_helper.TrainingHelper(inputs=inputs, sequence_length=seq_length) decoder_fn = self.create_decoder(helper=helper, mode=tf.contrib.learn.ModeKeys.TRAIN) initial_state = decoder_fn.cell.zero_state(self.batch_size, dtype=tf.float32) (decoder_output, _) = decoder_fn(initial_state, helper) losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=decoder_output.logits, labels=labels) optimizer = tf.train.AdamOptimizer(learning_rate=0.001) grads_and_vars = optimizer.compute_gradients(tf.reduce_mean(losses)) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) grads_and_vars_ = sess.run(grads_and_vars) for (grad, _) in grads_and_vars_: self.assertFalse(np.isnan(grad).any()) return grads_and_vars_ def test_with_dynamic_inputs(self): embeddings = tf.get_variable('W_embed', [self.vocab_size, self.input_depth]) helper = decode_helper.GreedyEmbeddingHelper(embedding=embeddings, start_tokens=([0] * self.batch_size), end_token=(- 1)) decoder_fn = self.create_decoder(helper=helper, mode=tf.contrib.learn.ModeKeys.INFER) initial_state = decoder_fn.cell.zero_state(self.batch_size, dtype=tf.float32) (decoder_output, _) = decoder_fn(initial_state, helper) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) decoder_output_ = sess.run(decoder_output) np.testing.assert_array_equal(decoder_output_.logits.shape, [self.max_decode_length, self.batch_size, self.vocab_size]) np.testing.assert_array_equal(decoder_output_.predicted_ids.shape, [self.max_decode_length, self.batch_size]) def test_with_beam_search(self): self.batch_size = 1 config = beam_search.BeamSearchConfig(beam_width=10, vocab_size=self.vocab_size, eos_token=(self.vocab_size - 2), length_penalty_weight=0.6, choose_successors_fn=beam_search.choose_top_k) embeddings = tf.get_variable('W_embed', [self.vocab_size, self.input_depth]) helper = decode_helper.GreedyEmbeddingHelper(embedding=embeddings, start_tokens=([0] * config.beam_width), end_token=(- 1)) decoder_fn = self.create_decoder(helper=helper, mode=tf.contrib.learn.ModeKeys.INFER) decoder_fn = beam_search_decoder.BeamSearchDecoder(decoder=decoder_fn, config=config) initial_state = decoder_fn.cell.zero_state(self.batch_size, dtype=tf.float32) (decoder_output, _) = decoder_fn(initial_state, helper) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) decoder_output_ = sess.run(decoder_output) np.testing.assert_array_equal(decoder_output_.predicted_ids.shape, [self.max_decode_length, 1, config.beam_width]) np.testing.assert_array_equal(decoder_output_.beam_search_output.beam_parent_ids.shape, [self.max_decode_length, 1, config.beam_width]) np.testing.assert_array_equal(decoder_output_.beam_search_output.scores.shape, [self.max_decode_length, 1, config.beam_width]) np.testing.assert_array_equal(decoder_output_.beam_search_output.original_outputs.predicted_ids.shape, [self.max_decode_length, 1, config.beam_width]) np.testing.assert_array_equal(decoder_output_.beam_search_output.original_outputs.logits.shape, [self.max_decode_length, 1, config.beam_width, self.vocab_size]) return decoder_output
class BasicDecoderTest(tf.test.TestCase, DecoderTests): 'Tests the `BasicDecoder` class.\n ' def setUp(self): tf.test.TestCase.setUp(self) tf.logging.set_verbosity(tf.logging.INFO) DecoderTests.__init__(self) def create_decoder(self, helper, mode): params = BasicDecoder.default_params() params['max_decode_length'] = self.max_decode_length decoder = BasicDecoder(params=params, mode=mode, vocab_size=self.vocab_size) return decoder
class AttentionDecoderTest(tf.test.TestCase, DecoderTests): 'Tests the `AttentionDecoder` class.\n ' def setUp(self): tf.test.TestCase.setUp(self) tf.logging.set_verbosity(tf.logging.INFO) DecoderTests.__init__(self) self.attention_dim = 64 self.input_seq_len = 10 def create_decoder(self, helper, mode): attention_fn = AttentionLayerDot(params={'num_units': self.attention_dim}, mode=tf.contrib.learn.ModeKeys.TRAIN) attention_values = tf.convert_to_tensor(np.random.randn(self.batch_size, self.input_seq_len, 32), dtype=tf.float32) attention_keys = tf.convert_to_tensor(np.random.randn(self.batch_size, self.input_seq_len, 32), dtype=tf.float32) params = AttentionDecoder.default_params() params['max_decode_length'] = self.max_decode_length return AttentionDecoder(params=params, mode=mode, vocab_size=self.vocab_size, attention_keys=attention_keys, attention_values=attention_values, attention_values_length=(np.arange(self.batch_size) + 1), attention_fn=attention_fn) def test_attention_scores(self): decoder_output_ = self.test_with_fixed_inputs() np.testing.assert_array_equal(decoder_output_.attention_scores.shape, [self.sequence_length, self.batch_size, self.input_seq_len]) scores_sum = np.sum(decoder_output_.attention_scores, axis=2) np.testing.assert_array_almost_equal(scores_sum, np.ones([self.sequence_length, self.batch_size]))
def _load_model_from_config(config_path, hparam_overrides, vocab_file, mode): 'Loads model from a configuration file' with gfile.GFile(config_path) as config_file: config = yaml.load(config_file) model_cls = (locate(config['model']) or getattr(models, config['model'])) model_params = config['model_params'] if hparam_overrides: model_params.update(hparam_overrides) model_params['decoder.params']['max_decode_length'] = 5 model_params['vocab_source'] = vocab_file model_params['vocab_target'] = vocab_file return model_cls(params=model_params, mode=mode)
class ExampleConfigTest(object): 'Interface for configuration-based tests' def __init__(self, *args, **kwargs): super(ExampleConfigTest, self).__init__(*args, **kwargs) self.vocab_file = None def _config_path(self): 'Returns the path to the configuration to be tested' raise NotImplementedError() def create_model(self, mode, params=None): 'Creates the model' return _load_model_from_config(config_path=self._config_path(), hparam_overrides=params, vocab_file=self.vocab_file.name, mode=mode)
class TestNMTLarge(ExampleConfigTest, EncoderDecoderTests): 'Tests nmt_large.yml' def _config_path(self): return os.path.join(EXAMPLE_CONFIG_DIR, 'nmt_large.yml')
class TestNMTMedium(ExampleConfigTest, EncoderDecoderTests): 'Tests nmt_medium.yml' def _config_path(self): return os.path.join(EXAMPLE_CONFIG_DIR, 'nmt_medium.yml')
class TestNMTSmall(ExampleConfigTest, EncoderDecoderTests): 'Tests nmt_small.yml' def _config_path(self): return os.path.join(EXAMPLE_CONFIG_DIR, 'nmt_small.yml')
class TestNMTConv(ExampleConfigTest, EncoderDecoderTests): 'Tests nmt_small.yml' def _config_path(self): return os.path.join(EXAMPLE_CONFIG_DIR, 'nmt_conv.yml')
class TestPrintModelAnalysisHook(tf.test.TestCase): 'Tests the `PrintModelAnalysisHook` hook' def test_begin(self): model_dir = tempfile.mkdtemp() outfile = tempfile.NamedTemporaryFile() tf.get_variable('weigths', [128, 128]) hook = hooks.PrintModelAnalysisHook(params={}, model_dir=model_dir, run_config=tf.contrib.learn.RunConfig()) hook.begin() with gfile.GFile(os.path.join(model_dir, 'model_analysis.txt')) as file: file_contents = file.read().strip() self.assertEqual(file_contents.decode(), '_TFProfRoot (--/16.38k params)\n weigths (128x128, 16.38k/16.38k params)') outfile.close()
class TestTrainSampleHook(tf.test.TestCase): 'Tests `TrainSampleHook` class.\n ' def setUp(self): super(TestTrainSampleHook, self).setUp() self.model_dir = tempfile.mkdtemp() self.sample_dir = os.path.join(self.model_dir, 'samples') pred_dict = {} pred_dict['predicted_tokens'] = tf.constant([['Hello', 'World', '笑w']]) pred_dict['labels.target_tokens'] = tf.constant([['Hello', 'World', '笑w']]) pred_dict['labels.target_len'] = (tf.constant(2),) graph_utils.add_dict_to_collection(pred_dict, 'predictions') def tearDown(self): super(TestTrainSampleHook, self).tearDown() shutil.rmtree(self.model_dir) def test_sampling(self): hook = hooks.TrainSampleHook(params={'every_n_steps': 10}, model_dir=self.model_dir, run_config=tf.contrib.learn.RunConfig()) global_step = tf.contrib.framework.get_or_create_global_step() no_op = tf.no_op() hook.begin() with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) sess.run(tf.tables_initializer()) mon_sess = monitored_session._HookedSession(sess, [hook]) sess.run(tf.assign(global_step, 0)) mon_sess.run(no_op) outfile = os.path.join(self.sample_dir, 'samples_000000.txt') with open(outfile, 'rb') as readfile: self.assertIn('Prediction followed by Target @ Step 0', readfile.read().decode('utf-8')) sess.run(tf.assign(global_step, 9)) mon_sess.run(no_op) outfile = os.path.join(self.sample_dir, 'samples_000009.txt') self.assertFalse(os.path.exists(outfile)) sess.run(tf.assign(global_step, 10)) mon_sess.run(no_op) outfile = os.path.join(self.sample_dir, 'samples_000010.txt') with open(outfile, 'rb') as readfile: self.assertIn('Prediction followed by Target @ Step 10', readfile.read().decode('utf-8'))
class TestMetadataCaptureHook(tf.test.TestCase): 'Test for the MetadataCaptureHook' def setUp(self): super(TestMetadataCaptureHook, self).setUp() self.model_dir = tempfile.mkdtemp() def tearDown(self): super(TestMetadataCaptureHook, self).tearDown() shutil.rmtree(self.model_dir) def test_capture(self): global_step = tf.contrib.framework.get_or_create_global_step() some_weights = tf.get_variable('weigths', [2, 128]) computation = tf.nn.softmax(some_weights) hook = hooks.MetadataCaptureHook(params={'step': 5}, model_dir=self.model_dir, run_config=tf.contrib.learn.RunConfig()) hook.begin() with self.test_session() as sess: sess.run(tf.global_variables_initializer()) mon_sess = monitored_session._HookedSession(sess, [hook]) sess.run(tf.assign(global_step, 0)) mon_sess.run(computation) self.assertEqual(gfile.ListDirectory(self.model_dir), []) sess.run(tf.assign(global_step, 5)) mon_sess.run(computation) self.assertEqual(gfile.ListDirectory(self.model_dir), []) mon_sess.run(computation) self.assertEqual(set(gfile.ListDirectory(self.model_dir)), set(['run_meta', 'tfprof_log', 'timeline.json']))
class TestInputPipelineDef(tf.test.TestCase): 'Tests InputPipeline string definitions' def test_without_extra_args(self): pipeline_def = yaml.load('\n class: ParallelTextInputPipeline\n params:\n source_files: ["file1"]\n target_files: ["file2"]\n num_epochs: 1\n shuffle: True\n ') pipeline = input_pipeline.make_input_pipeline_from_def(pipeline_def, tf.contrib.learn.ModeKeys.TRAIN) self.assertIsInstance(pipeline, input_pipeline.ParallelTextInputPipeline) self.assertEqual(pipeline.params['source_files'], ['file1']) self.assertEqual(pipeline.params['target_files'], ['file2']) self.assertEqual(pipeline.params['num_epochs'], 1) self.assertEqual(pipeline.params['shuffle'], True) def test_with_extra_args(self): pipeline_def = yaml.load('\n class: ParallelTextInputPipeline\n params:\n source_files: ["file1"]\n target_files: ["file2"]\n num_epochs: 1\n shuffle: True\n ') pipeline = input_pipeline.make_input_pipeline_from_def(def_dict=pipeline_def, mode=tf.contrib.learn.ModeKeys.TRAIN, num_epochs=5, shuffle=False) self.assertIsInstance(pipeline, input_pipeline.ParallelTextInputPipeline) self.assertEqual(pipeline.params['source_files'], ['file1']) self.assertEqual(pipeline.params['target_files'], ['file2']) self.assertEqual(pipeline.params['num_epochs'], 5) self.assertEqual(pipeline.params['shuffle'], False)
class TFRecordsInputPipelineTest(tf.test.TestCase): '\n Tests Data Provider operations.\n ' def setUp(self): super(TFRecordsInputPipelineTest, self).setUp() tf.logging.set_verbosity(tf.logging.INFO) def test_pipeline(self): tfrecords_file = test_utils.create_temp_tfrecords(sources=['Hello World . 笑'], targets=['Bye 泣']) pipeline = input_pipeline.TFRecordInputPipeline(params={'files': [tfrecords_file.name], 'source_field': 'source', 'target_field': 'target', 'num_epochs': 5, 'shuffle': False}, mode=tf.contrib.learn.ModeKeys.TRAIN) data_provider = pipeline.make_data_provider() features = pipeline.read_from_data_provider(data_provider) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) with tf.contrib.slim.queues.QueueRunners(sess): res = sess.run(features) self.assertEqual(res['source_len'], 5) self.assertEqual(res['target_len'], 4) np.testing.assert_array_equal(np.char.decode(res['source_tokens'].astype('S'), 'utf-8'), ['Hello', 'World', '.', '笑', 'SEQUENCE_END']) np.testing.assert_array_equal(np.char.decode(res['target_tokens'].astype('S'), 'utf-8'), ['SEQUENCE_START', 'Bye', '泣', 'SEQUENCE_END'])
class ParallelTextInputPipelineTest(tf.test.TestCase): '\n Tests Data Provider operations.\n ' def setUp(self): super(ParallelTextInputPipelineTest, self).setUp() tf.logging.set_verbosity(tf.logging.INFO) def test_pipeline(self): (file_source, file_target) = test_utils.create_temp_parallel_data(sources=['Hello World . 笑'], targets=['Bye 泣']) pipeline = input_pipeline.ParallelTextInputPipeline(params={'source_files': [file_source.name], 'target_files': [file_target.name], 'num_epochs': 5, 'shuffle': False}, mode=tf.contrib.learn.ModeKeys.TRAIN) data_provider = pipeline.make_data_provider() features = pipeline.read_from_data_provider(data_provider) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) with tf.contrib.slim.queues.QueueRunners(sess): res = sess.run(features) self.assertEqual(res['source_len'], 5) self.assertEqual(res['target_len'], 4) np.testing.assert_array_equal(np.char.decode(res['source_tokens'].astype('S'), 'utf-8'), ['Hello', 'World', '.', '笑', 'SEQUENCE_END']) np.testing.assert_array_equal(np.char.decode(res['target_tokens'].astype('S'), 'utf-8'), ['SEQUENCE_START', 'Bye', '泣', 'SEQUENCE_END'])