code
stringlengths
17
6.64M
class TestMosesBleu(tf.test.TestCase): 'Tests using the Moses multi-bleu script to calcualte BLEU score\n ' def _test_multi_bleu(self, hypotheses, references, lowercase, expected_bleu): 'Runs a multi-bleu test.' result = bleu.moses_multi_bleu(hypotheses=hypotheses, references=references, lowercase=lowercase) np.testing.assert_almost_equal(result, expected_bleu, decimal=2) def test_multi_bleu(self): self._test_multi_bleu(hypotheses=np.array(['The brown fox jumps over the dog 笑', 'The brown fox jumps over the dog 2 笑']), references=np.array(['The quick brown fox jumps over the lazy dog 笑', 'The quick brown fox jumps over the lazy dog 笑']), lowercase=False, expected_bleu=46.51) def test_empty(self): self._test_multi_bleu(hypotheses=np.array([]), references=np.array([]), lowercase=False, expected_bleu=0.0) def test_multi_bleu_lowercase(self): self._test_multi_bleu(hypotheses=np.array(['The brown fox jumps over The Dog 笑', 'The brown fox jumps over The Dog 2 笑']), references=np.array(['The quick brown fox jumps over the lazy dog 笑', 'The quick brown fox jumps over the lazy dog 笑']), lowercase=True, expected_bleu=46.51)
class TestTextMetricSpec(tf.test.TestCase): 'Abstract class for testing TextMetricSpecs\n based on hypotheses and references' def _test_metric_spec(self, metric_spec, hyps, refs, expected_scores): 'Tests a MetricSpec' predictions = {'predicted_tokens': tf.placeholder(dtype=tf.string)} labels = {'target_tokens': tf.placeholder(dtype=tf.string)} (value, update_op) = metric_spec.create_metric_ops(None, labels, predictions) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) scores = [] for (hyp, ref) in zip(hyps, refs): hyp = hyp.split(' ') ref = ref.split(' ') sess.run(update_op, {predictions['predicted_tokens']: [hyp], labels['target_tokens']: [ref]}) scores.append(sess.run(value)) for (score, expected) in zip(scores, expected_scores): np.testing.assert_almost_equal(score, expected, decimal=2) np.testing.assert_almost_equal(score, expected, decimal=2)
class TestBleuMetricSpec(TestTextMetricSpec): 'Tests the `BleuMetricSpec`' def test_bleu(self): metric_spec = BleuMetricSpec({}) return self._test_metric_spec(metric_spec=metric_spec, hyps=['A B C D E F', 'A B C D E F'], refs=['A B C D E F', 'A B A D E F'], expected_scores=[100.0, 69.19])
class TestRougeMetricSpec(TestTextMetricSpec): 'Tests the `RougeMetricSpec`' def test_rouge_1_f_score(self): metric_spec = RougeMetricSpec({'rouge_type': 'rouge_1/f_score'}) self._test_metric_spec(metric_spec=metric_spec, hyps=['A B C D E F', 'A B C D E F'], refs=['A B C D E F', 'A B A D E F'], expected_scores=[1.0, 0.954]) self._test_metric_spec(metric_spec=metric_spec, hyps=[], refs=[], expected_scores=[0.0]) self._test_metric_spec(metric_spec=metric_spec, hyps=['A'], refs=['B'], expected_scores=[0.0]) def test_rouge_2_f_score(self): metric_spec = RougeMetricSpec({'rouge_type': 'rouge_2/f_score'}) self._test_metric_spec(metric_spec=metric_spec, hyps=['A B C D E F', 'A B C D E F'], refs=['A B C D E F', 'A B A D E F'], expected_scores=[1.0, 0.8]) self._test_metric_spec(metric_spec=metric_spec, hyps=[], refs=[], expected_scores=[0.0]) self._test_metric_spec(metric_spec=metric_spec, hyps=['A'], refs=['B'], expected_scores=[0.0]) def test_rouge_l_f_score(self): metric_spec = RougeMetricSpec({'rouge_type': 'rouge_l/f_score'}) self._test_metric_spec(metric_spec=metric_spec, hyps=['A B C D E F', 'A B C D E F'], refs=['A B C D E F', 'A B A D E F'], expected_scores=[1.0, 0.916]) self._test_metric_spec(metric_spec=metric_spec, hyps=[], refs=[], expected_scores=[0.0]) self._test_metric_spec(metric_spec=metric_spec, hyps=['A'], refs=['B'], expected_scores=[0.0])
class TestRougeMetric(tf.test.TestCase): 'Tests the RougeMetric' def test_rouge(self): hypotheses = np.array(['The brown fox jumps over the dog 笑', 'The brown fox jumps over the dog 2 笑']) references = np.array(['The quick brown fox jumps over the lazy dog 笑', 'The quick brown fox jumps over the lazy dog 笑']) output = rouge.rouge(hypotheses, references) np.testing.assert_almost_equal(output['rouge_1/f_score'], 0.865, decimal=2) np.testing.assert_almost_equal(output['rouge_2/f_score'], 0.548, decimal=2) np.testing.assert_almost_equal(output['rouge_l/f_score'], 0.852, decimal=2)
class EncoderDecoderTests(tf.test.TestCase): 'Base class for EncoderDecoder tests. Tests for specific classes should\n inherit from this and tf.test.TestCase.\n ' def setUp(self): super(EncoderDecoderTests, self).setUp() tf.logging.set_verbosity(tf.logging.INFO) self.batch_size = 2 self.input_depth = 4 self.sequence_length = 10 self.vocab_list = [str(_) for _ in range(10)] self.vocab_list += ['笑う', '泣く', '了解', 'はい', '^_^'] self.vocab_size = len(self.vocab_list) self.vocab_file = test_utils.create_temporary_vocab_file(self.vocab_list) self.vocab_info = vocab.get_vocab_info(self.vocab_file.name) tf.contrib.framework.get_or_create_global_step() def tearDown(self): self.vocab_file.close() def create_model(self, _mode, _params=None): 'Creates model class to be tested. Subclasses must implement this method.\n ' self.skipTest('Base module should not be tested.') def _create_example(self): 'Creates example data for a test' source = np.random.randn(self.batch_size, self.sequence_length, self.input_depth) source_len = np.random.randint(0, self.sequence_length, [self.batch_size]) target_len = np.random.randint(0, (self.sequence_length * 2), [self.batch_size]) target = np.random.randn(self.batch_size, np.max(target_len), self.input_depth) labels = np.random.randint(0, self.vocab_size, [self.batch_size, (np.max(target_len) - 1)]) example_ = namedtuple('Example', ['source', 'source_len', 'target', 'target_len', 'labels']) return example_(source, source_len, target, target_len, labels) def _test_pipeline(self, mode, params=None): 'Helper function to test the full model pipeline.\n ' source_len = (self.sequence_length + 5) target_len = (self.sequence_length + 10) source = ' '.join(np.random.choice(self.vocab_list, source_len)) target = ' '.join(np.random.choice(self.vocab_list, target_len)) (sources_file, targets_file) = test_utils.create_temp_parallel_data(sources=[source], targets=[target]) model = self.create_model(mode, params) input_pipeline_ = input_pipeline.ParallelTextInputPipeline(params={'source_files': [sources_file.name], 'target_files': [targets_file.name]}, mode=mode) input_fn = training_utils.create_input_fn(pipeline=input_pipeline_, batch_size=self.batch_size) (features, labels) = input_fn() fetches = model(features, labels, None) fetches = [_ for _ in fetches if (_ is not None)] with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) sess.run(tf.tables_initializer()) with tf.contrib.slim.queues.QueueRunners(sess): fetches_ = sess.run(fetches) sources_file.close() targets_file.close() return (model, fetches_) def test_train(self): (model, fetches_) = self._test_pipeline(tf.contrib.learn.ModeKeys.TRAIN) (predictions_, loss_, _) = fetches_ target_len = ((self.sequence_length + 10) + 2) max_decode_length = model.params['target.max_seq_len'] expected_decode_len = np.minimum(target_len, max_decode_length) np.testing.assert_array_equal(predictions_['logits'].shape, [self.batch_size, (expected_decode_len - 1), model.target_vocab_info.total_size]) np.testing.assert_array_equal(predictions_['losses'].shape, [self.batch_size, (expected_decode_len - 1)]) np.testing.assert_array_equal(predictions_['predicted_ids'].shape, [self.batch_size, (expected_decode_len - 1)]) self.assertFalse(np.isnan(loss_)) def test_infer(self): (model, fetches_) = self._test_pipeline(tf.contrib.learn.ModeKeys.INFER) (predictions_,) = fetches_ pred_len = predictions_['predicted_ids'].shape[1] np.testing.assert_array_equal(predictions_['logits'].shape, [self.batch_size, pred_len, model.target_vocab_info.total_size]) np.testing.assert_array_equal(predictions_['predicted_ids'].shape, [self.batch_size, pred_len]) def test_infer_beam_search(self): self.batch_size = 1 beam_width = 10 (model, fetches_) = self._test_pipeline(mode=tf.contrib.learn.ModeKeys.INFER, params={'inference.beam_search.beam_width': 10}) (predictions_,) = fetches_ pred_len = predictions_['predicted_ids'].shape[1] vocab_size = model.target_vocab_info.total_size np.testing.assert_array_equal(predictions_['predicted_ids'].shape, [1, pred_len, beam_width]) np.testing.assert_array_equal(predictions_['beam_search_output.beam_parent_ids'].shape, [1, pred_len, beam_width]) np.testing.assert_array_equal(predictions_['beam_search_output.scores'].shape, [1, pred_len, beam_width]) np.testing.assert_array_equal(predictions_['beam_search_output.original_outputs.predicted_ids'].shape, [1, pred_len, beam_width]) np.testing.assert_array_equal(predictions_['beam_search_output.original_outputs.logits'].shape, [1, pred_len, beam_width, vocab_size])
class TestBasicSeq2Seq(EncoderDecoderTests): 'Tests the seq2seq.models.BasicSeq2Seq model.\n ' def setUp(self): super(TestBasicSeq2Seq, self).setUp() def create_model(self, mode, params=None): params_ = BasicSeq2Seq.default_params().copy() params_.update(TEST_PARAMS) params_.update({'vocab_source': self.vocab_file.name, 'vocab_target': self.vocab_file.name, 'bridge.class': 'PassThroughBridge'}) params_.update((params or {})) return BasicSeq2Seq(params=params_, mode=mode)
class TestAttentionSeq2Seq(EncoderDecoderTests): 'Tests the seq2seq.models.AttentionSeq2Seq model.\n ' def setUp(self): super(TestAttentionSeq2Seq, self).setUp() self.encoder_rnn_cell = tf.contrib.rnn.LSTMCell(32) self.decoder_rnn_cell = tf.contrib.rnn.LSTMCell(32) self.attention_dim = 128 def create_model(self, mode, params=None): params_ = AttentionSeq2Seq.default_params().copy() params_.update(TEST_PARAMS) params_.update({'source.reverse': True, 'vocab_source': self.vocab_file.name, 'vocab_target': self.vocab_file.name}) params_.update((params or {})) return AttentionSeq2Seq(params=params_, mode=mode)
def _clear_flags(): "Resets Tensorflow's FLAG values" tf.app.flags.FLAGS = tf.app.flags._FlagValues() tf.app.flags._global_parser = argparse.ArgumentParser()
class PipelineTest(tf.test.TestCase): 'Tests training and inference scripts.\n ' def setUp(self): super(PipelineTest, self).setUp() self.output_dir = tempfile.mkdtemp() self.bin_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../bin')) tf.contrib.framework.get_or_create_global_step() def tearDown(self): shutil.rmtree(self.output_dir, ignore_errors=True) super(PipelineTest, self).tearDown() def test_train_infer(self): 'Tests training and inference scripts.\n ' (sources_train, targets_train) = test_utils.create_temp_parallel_data(sources=['a a a a', 'b b b b', 'c c c c', '笑 笑 笑 笑'], targets=['b b b b', 'a a a a', 'c c c c', '泣 泣 泣 泣']) (sources_dev, targets_dev) = test_utils.create_temp_parallel_data(sources=['a a', 'b b', 'c c c', '笑 笑 笑'], targets=['b b', 'a a', 'c c c', '泣 泣 泣']) vocab_source = test_utils.create_temporary_vocab_file(['a', 'b', 'c', '笑']) vocab_target = test_utils.create_temporary_vocab_file(['a', 'b', 'c', '泣']) _clear_flags() tf.reset_default_graph() train_script = imp.load_source('seq2seq.test.train_bin', os.path.join(BIN_FOLDER, 'train.py')) tf.app.flags.FLAGS.output_dir = self.output_dir tf.app.flags.FLAGS.hooks = '\n - class: PrintModelAnalysisHook\n - class: MetadataCaptureHook\n - class: TrainSampleHook\n ' tf.app.flags.FLAGS.metrics = '\n - class: LogPerplexityMetricSpec\n - class: BleuMetricSpec\n - class: RougeMetricSpec\n params:\n rouge_type: rouge_1/f_score\n ' tf.app.flags.FLAGS.model = 'AttentionSeq2Seq' tf.app.flags.FLAGS.model_params = '\n attention.params:\n num_units: 10\n vocab_source: {}\n vocab_target: {}\n '.format(vocab_source.name, vocab_target.name) tf.app.flags.FLAGS.batch_size = 2 config_path = os.path.join(self.output_dir, 'train_config.yml') with gfile.GFile(config_path, 'w') as config_file: yaml.dump({'input_pipeline_train': {'class': 'ParallelTextInputPipeline', 'params': {'source_files': [sources_train.name], 'target_files': [targets_train.name]}}, 'input_pipeline_dev': {'class': 'ParallelTextInputPipeline', 'params': {'source_files': [sources_dev.name], 'target_files': [targets_dev.name]}}, 'train_steps': 50, 'model_params': {'embedding.dim': 10, 'decoder.params': {'rnn_cell': {'cell_class': 'GRUCell', 'cell_params': {'num_units': 8}}}, 'encoder.params': {'rnn_cell': {'cell_class': 'GRUCell', 'cell_params': {'num_units': 8}}}}}, config_file) tf.app.flags.FLAGS.config_paths = config_path tf.logging.set_verbosity(tf.logging.INFO) train_script.main([]) expected_checkpoint = os.path.join(self.output_dir, 'model.ckpt-50.data-00000-of-00001') self.assertTrue(os.path.exists(expected_checkpoint)) _clear_flags() tf.reset_default_graph() infer_script = imp.load_source('seq2seq.test.infer_bin', os.path.join(BIN_FOLDER, 'infer.py')) attention_dir = os.path.join(self.output_dir, 'att') tf.app.flags.FLAGS.model_dir = self.output_dir tf.app.flags.FLAGS.input_pipeline = '\n class: ParallelTextInputPipeline\n params:\n source_files:\n - {}\n target_files:\n - {}\n '.format(sources_dev.name, targets_dev.name) tf.app.flags.FLAGS.batch_size = 2 tf.app.flags.FLAGS.checkpoint_path = os.path.join(self.output_dir, 'model.ckpt-50') tf.app.flags.FLAGS.tasks = '\n - class: DecodeText\n - class: DumpAttention\n params:\n output_dir: {}\n '.format(attention_dir) infer_script.main([]) self.assertTrue(os.path.exists(os.path.join(attention_dir, 'attention_scores.npz'))) self.assertTrue(os.path.exists(os.path.join(attention_dir, '00002.png'))) scores = np.load(os.path.join(attention_dir, 'attention_scores.npz')) self.assertIn('arr_0', scores) self.assertEqual(scores['arr_0'].shape[1], 3) self.assertIn('arr_1', scores) self.assertEqual(scores['arr_1'].shape[1], 3) self.assertIn('arr_2', scores) self.assertEqual(scores['arr_2'].shape[1], 4) self.assertIn('arr_3', scores) self.assertEqual(scores['arr_3'].shape[1], 4) _clear_flags() tf.reset_default_graph() infer_script = imp.load_source('seq2seq.test.infer_bin', os.path.join(BIN_FOLDER, 'infer.py')) tf.app.flags.FLAGS.model_dir = self.output_dir tf.app.flags.FLAGS.input_pipeline = '\n class: ParallelTextInputPipeline\n params:\n source_files:\n - {}\n target_files:\n - {}\n '.format(sources_dev.name, targets_dev.name) tf.app.flags.FLAGS.batch_size = 2 tf.app.flags.FLAGS.checkpoint_path = os.path.join(self.output_dir, 'model.ckpt-50') tf.app.flags.FLAGS.model_params = '\n inference.beam_search.beam_width: 5\n ' tf.app.flags.FLAGS.tasks = '\n - class: DecodeText\n params:\n postproc_fn: seq2seq.data.postproc.decode_sentencepiece\n - class: DumpBeams\n params:\n file: {}\n '.format(os.path.join(self.output_dir, 'beams.npz')) infer_script.main([]) self.assertTrue(os.path.exists(os.path.join(self.output_dir, 'beams.npz')))
class PoolingEncoderTest(tf.test.TestCase): '\n Tests the PoolingEncoder class.\n ' def setUp(self): super(PoolingEncoderTest, self).setUp() self.batch_size = 4 self.sequence_length = 16 self.input_depth = 10 self.mode = tf.contrib.learn.ModeKeys.TRAIN def _test_with_params(self, params): 'Tests the encoder with a given parameter configuration' inputs = tf.random_normal([self.batch_size, self.sequence_length, self.input_depth]) example_length = (tf.ones(self.batch_size, dtype=tf.int32) * self.sequence_length) encode_fn = PoolingEncoder(params, self.mode) encoder_output = encode_fn(inputs, example_length) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) encoder_output_ = sess.run(encoder_output) np.testing.assert_array_equal(encoder_output_.outputs.shape, [self.batch_size, self.sequence_length, self.input_depth]) np.testing.assert_array_equal(encoder_output_.attention_values.shape, [self.batch_size, self.sequence_length, self.input_depth]) np.testing.assert_array_equal(encoder_output_.final_state.shape, [self.batch_size, self.input_depth]) def test_encode_with_pos(self): self._test_with_params({'position_embeddings.enable': True, 'position_embeddings.num_positions': self.sequence_length}) def test_encode_without_pos(self): self._test_with_params({'position_embeddings.enable': False, 'position_embeddings.num_positions': 0})
class ExtendedMultiRNNCellTest(tf.test.TestCase): 'Tests the ExtendedMultiRNNCell' def test_without_residuals(self): inputs = tf.constant(np.random.randn(1, 2)) state = (tf.constant(np.random.randn(1, 2)), tf.constant(np.random.randn(1, 2))) with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)): standard_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.GRUCell(2) for _ in range(2)], state_is_tuple=True) res_standard = standard_cell(inputs, state, scope='standard') test_cell = rnn_cell.ExtendedMultiRNNCell([tf.contrib.rnn.GRUCell(2) for _ in range(2)]) res_test = test_cell(inputs, state, scope='test') with self.test_session() as sess: sess.run([tf.global_variables_initializer()]) (res_standard_, res_test_) = sess.run([res_standard, res_test]) self.assertAllClose(res_standard_[0], res_test_[0]) self.assertAllClose(res_standard_[1][0], res_test_[1][0]) self.assertAllClose(res_standard_[1][1], res_test_[1][1]) def _test_with_residuals(self, inputs, **kwargs): 'Runs the cell in a session' inputs = tf.convert_to_tensor(inputs) state = (tf.constant(np.random.randn(1, 2)), tf.constant(np.random.randn(1, 2))) with tf.variable_scope('root', initializer=tf.constant_initializer(0.5)): test_cell = rnn_cell.ExtendedMultiRNNCell([tf.contrib.rnn.GRUCell(2) for _ in range(2)], residual_connections=True, **kwargs) res_test = test_cell(inputs, state, scope='test') with self.test_session() as sess: sess.run([tf.global_variables_initializer()]) return sess.run(res_test) def _test_constant_shape(self, combiner): "Tests a residual combiner whose shape doesn't change\n with depth" inputs = np.random.randn(1, 2) with tf.variable_scope('same_input_size'): res_ = self._test_with_residuals(inputs, residual_combiner=combiner) self.assertEqual(res_[0].shape, (1, 2)) self.assertEqual(res_[1][0].shape, (1, 2)) self.assertEqual(res_[1][1].shape, (1, 2)) inputs = np.random.randn(1, 5) with tf.variable_scope('diff_input_size'): res_ = self._test_with_residuals(inputs, residual_combiner=combiner) self.assertEqual(res_[0].shape, (1, 2)) self.assertEqual(res_[1][0].shape, (1, 2)) self.assertEqual(res_[1][1].shape, (1, 2)) with tf.variable_scope('same_input_size_dense'): res_ = self._test_with_residuals(inputs, residual_combiner=combiner, residual_dense=True) self.assertEqual(res_[0].shape, (1, 2)) self.assertEqual(res_[1][0].shape, (1, 2)) self.assertEqual(res_[1][1].shape, (1, 2)) inputs = np.random.randn(1, 5) with tf.variable_scope('diff_input_size_dense'): res_ = self._test_with_residuals(inputs, residual_combiner=combiner, residual_dense=True) self.assertEqual(res_[0].shape, (1, 2)) self.assertEqual(res_[1][0].shape, (1, 2)) self.assertEqual(res_[1][1].shape, (1, 2)) def test_residuals_mean(self): self._test_constant_shape(combiner='mean') def test_residuals_add(self): self._test_constant_shape(combiner='add') def test_residuals_concat(self): inputs = np.random.randn(1, 2) with tf.variable_scope('same_input_size'): res_ = self._test_with_residuals(inputs, residual_combiner='concat') self.assertEqual(res_[0].shape, (1, 6)) self.assertEqual(res_[1][0].shape, (1, 2)) self.assertEqual(res_[1][1].shape, (1, 2)) inputs = np.random.randn(1, 5) with tf.variable_scope('diff_input_size'): res_ = self._test_with_residuals(inputs, residual_combiner='concat') self.assertEqual(res_[0].shape, (1, ((5 + 2) + 2))) self.assertEqual(res_[1][0].shape, (1, 2)) self.assertEqual(res_[1][1].shape, (1, 2)) inputs = np.random.randn(1, 2) with tf.variable_scope('same_input_size_dense'): res_ = self._test_with_residuals(inputs, residual_combiner='concat', residual_dense=True) self.assertEqual(res_[0].shape, (1, ((2 + 4) + 2))) self.assertEqual(res_[1][0].shape, (1, 2)) self.assertEqual(res_[1][1].shape, (1, 2)) inputs = np.random.randn(1, 5) with tf.variable_scope('diff_input_size_dense'): res_ = self._test_with_residuals(inputs, residual_combiner='concat', residual_dense=True) self.assertEqual(res_[0].shape, (1, ((2 + (5 + 2)) + 5))) self.assertEqual(res_[1][0].shape, (1, 2)) self.assertEqual(res_[1][1].shape, (1, 2))
class UnidirectionalRNNEncoderTest(tf.test.TestCase): '\n Tests the UnidirectionalRNNEncoder class.\n ' def setUp(self): super(UnidirectionalRNNEncoderTest, self).setUp() tf.logging.set_verbosity(tf.logging.INFO) self.batch_size = 4 self.sequence_length = 16 self.input_depth = 10 self.mode = tf.contrib.learn.ModeKeys.TRAIN self.params = rnn_encoder.UnidirectionalRNNEncoder.default_params() self.params['rnn_cell']['cell_params']['num_units'] = 32 self.params['rnn_cell']['cell_class'] = 'BasicLSTMCell' def test_encode(self): inputs = tf.random_normal([self.batch_size, self.sequence_length, self.input_depth]) example_length = (tf.ones(self.batch_size, dtype=tf.int32) * self.sequence_length) encode_fn = rnn_encoder.UnidirectionalRNNEncoder(self.params, self.mode) encoder_output = encode_fn(inputs, example_length) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) encoder_output_ = sess.run(encoder_output) np.testing.assert_array_equal(encoder_output_.outputs.shape, [self.batch_size, self.sequence_length, 32]) self.assertIsInstance(encoder_output_.final_state, tf.contrib.rnn.LSTMStateTuple) np.testing.assert_array_equal(encoder_output_.final_state.h.shape, [self.batch_size, 32]) np.testing.assert_array_equal(encoder_output_.final_state.c.shape, [self.batch_size, 32])
class BidirectionalRNNEncoderTest(tf.test.TestCase): '\n Tests the BidirectionalRNNEncoder class.\n ' def setUp(self): super(BidirectionalRNNEncoderTest, self).setUp() tf.logging.set_verbosity(tf.logging.INFO) self.batch_size = 4 self.sequence_length = 16 self.input_depth = 10 self.params = rnn_encoder.BidirectionalRNNEncoder.default_params() self.params['rnn_cell']['cell_params']['num_units'] = 32 self.params['rnn_cell']['cell_class'] = 'BasicLSTMCell' self.mode = tf.contrib.learn.ModeKeys.TRAIN def test_encode(self): inputs = tf.random_normal([self.batch_size, self.sequence_length, self.input_depth]) example_length = (tf.ones(self.batch_size, dtype=tf.int32) * self.sequence_length) encode_fn = rnn_encoder.BidirectionalRNNEncoder(self.params, self.mode) encoder_output = encode_fn(inputs, example_length) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) encoder_output_ = sess.run(encoder_output) np.testing.assert_array_equal(encoder_output_.outputs.shape, [self.batch_size, self.sequence_length, (32 * 2)]) self.assertIsInstance(encoder_output_.final_state[0], tf.contrib.rnn.LSTMStateTuple) self.assertIsInstance(encoder_output_.final_state[1], tf.contrib.rnn.LSTMStateTuple) np.testing.assert_array_equal(encoder_output_.final_state[0].h.shape, [self.batch_size, 32]) np.testing.assert_array_equal(encoder_output_.final_state[0].c.shape, [self.batch_size, 32]) np.testing.assert_array_equal(encoder_output_.final_state[1].h.shape, [self.batch_size, 32]) np.testing.assert_array_equal(encoder_output_.final_state[1].c.shape, [self.batch_size, 32])
class StackBidirectionalRNNEncoderTest(tf.test.TestCase): '\n Tests the StackBidirectionalRNNEncoder class.\n ' def setUp(self): super(StackBidirectionalRNNEncoderTest, self).setUp() tf.logging.set_verbosity(tf.logging.INFO) self.batch_size = 4 self.sequence_length = 16 self.input_depth = 10 self.mode = tf.contrib.learn.ModeKeys.TRAIN def _test_encode_with_params(self, params): 'Tests the StackBidirectionalRNNEncoder with a specific cell' inputs = tf.random_normal([self.batch_size, self.sequence_length, self.input_depth]) example_length = (tf.ones(self.batch_size, dtype=tf.int32) * self.sequence_length) encode_fn = rnn_encoder.StackBidirectionalRNNEncoder(params, self.mode) encoder_output = encode_fn(inputs, example_length) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) encoder_output_ = sess.run(encoder_output) output_size = encode_fn.params['rnn_cell']['cell_params']['num_units'] np.testing.assert_array_equal(encoder_output_.outputs.shape, [self.batch_size, self.sequence_length, (output_size * 2)]) return encoder_output_ def test_encode_with_single_cell(self): encoder_output_ = self._test_encode_with_params({'rnn_cell': {'num_layers': 1, 'cell_params': {'num_units': 32}}}) self.assertIsInstance(encoder_output_.final_state[0][0], tf.contrib.rnn.LSTMStateTuple) self.assertIsInstance(encoder_output_.final_state[1][0], tf.contrib.rnn.LSTMStateTuple) np.testing.assert_array_equal(encoder_output_.final_state[0][0].h.shape, [self.batch_size, 32]) np.testing.assert_array_equal(encoder_output_.final_state[0][0].c.shape, [self.batch_size, 32]) np.testing.assert_array_equal(encoder_output_.final_state[1][0].h.shape, [self.batch_size, 32]) np.testing.assert_array_equal(encoder_output_.final_state[1][0].c.shape, [self.batch_size, 32]) def test_encode_with_multi_cell(self): encoder_output_ = self._test_encode_with_params({'rnn_cell': {'num_layers': 4, 'cell_params': {'num_units': 32}}}) for layer_idx in range(4): self.assertIsInstance(encoder_output_.final_state[0][layer_idx], tf.contrib.rnn.LSTMStateTuple) self.assertIsInstance(encoder_output_.final_state[1][layer_idx], tf.contrib.rnn.LSTMStateTuple) np.testing.assert_array_equal(encoder_output_.final_state[0][layer_idx].h.shape, [self.batch_size, 32]) np.testing.assert_array_equal(encoder_output_.final_state[0][layer_idx].c.shape, [self.batch_size, 32]) np.testing.assert_array_equal(encoder_output_.final_state[1][layer_idx].h.shape, [self.batch_size, 32]) np.testing.assert_array_equal(encoder_output_.final_state[1][layer_idx].c.shape, [self.batch_size, 32])
class TestGetRNNCell(tf.test.TestCase): 'Tests the get_rnn_cell function.\n ' def test_single_layer(self): cell = training_utils.get_rnn_cell(cell_class='BasicLSTMCell', cell_params={'num_units': 16}, num_layers=1) self.assertIsInstance(cell, tf.contrib.rnn.BasicLSTMCell) self.assertEqual(cell.output_size, 16) def test_multi_layer(self): cell = training_utils.get_rnn_cell(cell_class='BasicLSTMCell', cell_params={'num_units': 16}, num_layers=2) self.assertIsInstance(cell, rnn_cell.ExtendedMultiRNNCell) self.assertEqual(cell.output_size, 16) def test_full_class_path(self): cell = training_utils.get_rnn_cell(cell_class='tensorflow.contrib.rnn.BasicRNNCell', cell_params={'num_units': 16}, num_layers=1) self.assertIsInstance(cell, tf.contrib.rnn.BasicRNNCell) self.assertEqual(cell.output_size, 16) def test_dropout(self): cell = training_utils.get_rnn_cell(cell_class='BasicLSTMCell', cell_params={'num_units': 16}, num_layers=1, dropout_input_keep_prob=0.5) self.assertIsInstance(cell, tf.contrib.rnn.DropoutWrapper) self.assertEqual(cell.output_size, 16) def test_extra_args(self): with self.assertRaises(ValueError): training_utils.get_rnn_cell(cell_class='LSTMCell', cell_params={'num_units': 16, 'use_peepholesERROR': True}, num_layers=1) cell = training_utils.get_rnn_cell(cell_class='LSTMCell', cell_params={'num_units': 8, 'use_peepholes': True, 'forget_bias': 0.5}, num_layers=1) self.assertIsInstance(cell, tf.contrib.rnn.LSTMCell) self.assertEqual(cell._use_peepholes, True) self.assertEqual(cell._forget_bias, 0.5) self.assertEqual(cell.output_size, 8)
class TestTrainOptions(tf.test.TestCase): 'Tests reading and writing of training options' def setUp(self): super(TestTrainOptions, self).setUp() self.model_dir = tempfile.mkdtemp() self.model_params = {'num_layers': 4} self.model_class = 'AttentionSeq2Seq' def test_read_write(self): saved_opts = training_utils.TrainOptions(model_class=self.model_class, model_params=self.model_params) saved_opts.dump(self.model_dir) loaded_opt = training_utils.TrainOptions.load(model_dir=self.model_dir) self.assertEqual(saved_opts.model_params, loaded_opt.model_params) self.assertEqual(saved_opts.model_class, loaded_opt.model_class)
class TestInputFn(tf.test.TestCase): 'Tests create_input_fn' def _test_with_args(self, **kwargs): 'Helper function to test create_input_fn with keyword arguments' (sources_file, targets_file) = test_utils.create_temp_parallel_data(sources=['Hello World .'], targets=['Goodbye .']) pipeline = input_pipeline.ParallelTextInputPipeline(params={'source_files': [sources_file.name], 'target_files': [targets_file.name]}, mode=tf.contrib.learn.ModeKeys.TRAIN) input_fn = training_utils.create_input_fn(pipeline=pipeline, **kwargs) (features, labels) = input_fn() with self.test_session() as sess: with tf.contrib.slim.queues.QueueRunners(sess): (features_, labels_) = sess.run([features, labels]) self.assertEqual(set(features_.keys()), set(['source_tokens', 'source_len'])) self.assertEqual(set(labels_.keys()), set(['target_tokens', 'target_len'])) def test_without_buckets(self): self._test_with_args(batch_size=10) def test_wit_buckets(self): self._test_with_args(batch_size=10, bucket_boundaries=[0, 5, 10])
class TestLRDecay(tf.test.TestCase): 'Tests learning rate decay function.\n ' def test_no_decay(self): decay_fn = training_utils.create_learning_rate_decay_fn(decay_type=None, decay_steps=5, decay_rate=2.0) self.assertEqual(decay_fn, None) decay_fn = training_utils.create_learning_rate_decay_fn(decay_type='', decay_steps=5, decay_rate=2.0) self.assertEqual(decay_fn, None) def test_decay_without_min(self): decay_fn = training_utils.create_learning_rate_decay_fn(decay_type='exponential_decay', decay_steps=10, decay_rate=0.9, start_decay_at=100, stop_decay_at=1000, staircase=False) initial_lr = 1.0 with self.test_session() as sess: np.testing.assert_equal(sess.run(decay_fn(initial_lr, 50)), initial_lr) np.testing.assert_almost_equal(sess.run(decay_fn(initial_lr, 115)), (initial_lr * (0.9 ** (15.0 / 10.0)))) np.testing.assert_almost_equal(sess.run(decay_fn(initial_lr, 5000)), (initial_lr * (0.9 ** ((1000.0 - 100.0) / 10.0)))) def test_decay_with_min(self): decay_fn = training_utils.create_learning_rate_decay_fn(decay_type='exponential_decay', decay_steps=10, decay_rate=0.9, start_decay_at=100, stop_decay_at=1000.0, min_learning_rate=0.01, staircase=False) initial_lr = 1.0 with self.test_session() as sess: np.testing.assert_almost_equal(sess.run(decay_fn(initial_lr, 900)), 0.01)
def create_temp_parallel_data(sources, targets): '\n Creates a temporary TFRecords file.\n\n Args:\n source: List of source sentences\n target: List of target sentences\n\n Returns:\n A tuple (sources_file, targets_file).\n ' file_source = tempfile.NamedTemporaryFile() file_target = tempfile.NamedTemporaryFile() file_source.write('\n'.join(sources).encode('utf-8')) file_source.flush() file_target.write('\n'.join(targets).encode('utf-8')) file_target.flush() return (file_source, file_target)
def create_temp_tfrecords(sources, targets): '\n Creates a temporary TFRecords file.\n\n Args:\n source: List of source sentences\n target: List of target sentences\n\n Returns:\n A tuple (sources_file, targets_file).\n ' output_file = tempfile.NamedTemporaryFile() writer = tf.python_io.TFRecordWriter(output_file.name) for (source, target) in zip(sources, targets): ex = tf.train.Example() ex.features.feature['source'].bytes_list.value.extend([source.encode('utf-8')]) ex.features.feature['target'].bytes_list.value.extend([target.encode('utf-8')]) writer.write(ex.SerializeToString()) writer.close() return output_file
def create_temporary_vocab_file(words, counts=None): '\n Creates a temporary vocabulary file.\n\n Args:\n words: List of words in the vocabulary\n\n Returns:\n A temporary file object with one word per line\n ' vocab_file = tempfile.NamedTemporaryFile() if (counts is None): for token in words: vocab_file.write((token + '\n').encode('utf-8')) else: for (token, count) in zip(words, counts): vocab_file.write('{}\t{}\n'.format(token, count).encode('utf-8')) vocab_file.flush() return vocab_file
class VocabInfoTest(tf.test.TestCase): 'Tests VocabInfo class' def setUp(self): super(VocabInfoTest, self).setUp() tf.logging.set_verbosity(tf.logging.INFO) self.vocab_list = ['Hello', '.', 'Bye'] self.vocab_file = test_utils.create_temporary_vocab_file(self.vocab_list) def tearDown(self): super(VocabInfoTest, self).tearDown() self.vocab_file.close() def test_vocab_info(self): vocab_info = vocab.get_vocab_info(self.vocab_file.name) self.assertEqual(vocab_info.vocab_size, 3) self.assertEqual(vocab_info.path, self.vocab_file.name) self.assertEqual(vocab_info.special_vocab.UNK, 3) self.assertEqual(vocab_info.special_vocab.SEQUENCE_START, 4) self.assertEqual(vocab_info.special_vocab.SEQUENCE_END, 5) self.assertEqual(vocab_info.total_size, 6)
class CreateVocabularyLookupTableTest(tf.test.TestCase): '\n Tests Vocabulary lookup table operations.\n ' def test_without_counts(self): vocab_list = ['Hello', '.', '笑'] vocab_file = test_utils.create_temporary_vocab_file(vocab_list) (vocab_to_id_table, id_to_vocab_table, _, vocab_size) = vocab.create_vocabulary_lookup_table(vocab_file.name) self.assertEqual(vocab_size, 6) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) sess.run(tf.tables_initializer()) ids = vocab_to_id_table.lookup(tf.convert_to_tensor(['Hello', '.', '笑', '??', 'xxx'])) ids = sess.run(ids) np.testing.assert_array_equal(ids, [0, 1, 2, 3, 3]) words = id_to_vocab_table.lookup(tf.convert_to_tensor([0, 1, 2, 3], dtype=tf.int64)) words = sess.run(words) np.testing.assert_array_equal(np.char.decode(words.astype('S'), 'utf-8'), ['Hello', '.', '笑', 'UNK']) def test_with_counts(self): vocab_list = ['Hello', '.', '笑'] vocab_counts = [100, 200, 300] vocab_file = test_utils.create_temporary_vocab_file(vocab_list, vocab_counts) (vocab_to_id_table, id_to_vocab_table, word_to_count_table, vocab_size) = vocab.create_vocabulary_lookup_table(vocab_file.name) self.assertEqual(vocab_size, 6) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) sess.run(tf.tables_initializer()) ids = vocab_to_id_table.lookup(tf.convert_to_tensor(['Hello', '.', '笑', '??', 'xxx'])) ids = sess.run(ids) np.testing.assert_array_equal(ids, [0, 1, 2, 3, 3]) words = id_to_vocab_table.lookup(tf.convert_to_tensor([0, 1, 2, 3], dtype=tf.int64)) words = sess.run(words) np.testing.assert_array_equal(np.char.decode(words.astype('S'), 'utf-8'), ['Hello', '.', '笑', 'UNK']) counts = word_to_count_table.lookup(tf.convert_to_tensor(['Hello', '.', '笑', '??', 'xxx'])) counts = sess.run(counts) np.testing.assert_array_equal(counts, [100, 200, 300, (- 1), (- 1)])
@six.add_metaclass(abc.ABCMeta) class TrainingHook(tf.train.SessionRunHook, Configurable): 'Abstract base class for training hooks.\n ' def __init__(self, params, model_dir, run_config): tf.train.SessionRunHook.__init__(self) Configurable.__init__(self, params, tf.contrib.learn.ModeKeys.TRAIN) self._model_dir = model_dir self._run_config = run_config @property def model_dir(self): 'Returns the directory model checkpoints are written to.\n ' return os.path.abspath(self._model_dir) @property def is_chief(self): 'Returns true if and only if the current process is the chief.\n This is used for distributed training.\n ' return self._run_config.is_chief @abstractstaticmethod def default_params(): raise NotImplementedError()
class MetadataCaptureHook(TrainingHook): 'A hook to capture metadata for a single step.\n Useful for performance debugging. It performs a full trace and saves\n run_metadata and Chrome timeline information to a file.\n\n Args:\n step: The step number to trace. The hook is only enable for this step.\n ' def __init__(self, params, model_dir, run_config): super(MetadataCaptureHook, self).__init__(params, model_dir, run_config) self._active = False self._done = False self._global_step = None self._output_dir = os.path.abspath(self.model_dir) @staticmethod def default_params(): return {'step': 10} def begin(self): self._global_step = tf.train.get_global_step() def before_run(self, _run_context): if ((not self.is_chief) or self._done): return if (not self._active): return tf.train.SessionRunArgs(self._global_step) else: tf.logging.info('Performing full trace on next step.') run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) return tf.train.SessionRunArgs(self._global_step, options=run_options) def after_run(self, _run_context, run_values): if ((not self.is_chief) or self._done): return step_done = run_values.results if self._active: tf.logging.info('Captured full trace at step %s', step_done) gfile.MakeDirs(self._output_dir) trace_path = os.path.join(self._output_dir, 'run_meta') with gfile.GFile(trace_path, 'wb') as trace_file: trace_file.write(run_values.run_metadata.SerializeToString()) tf.logging.info('Saved run_metadata to %s', trace_path) timeline_path = os.path.join(self._output_dir, 'timeline.json') with gfile.GFile(timeline_path, 'w') as timeline_file: tl_info = timeline.Timeline(run_values.run_metadata.step_stats) tl_chrome = tl_info.generate_chrome_trace_format(show_memory=True) timeline_file.write(tl_chrome) tf.logging.info('Saved timeline to %s', timeline_path) tf.contrib.tfprof.tfprof_logger.write_op_log(graph=tf.get_default_graph(), log_dir=self._output_dir, run_meta=run_values.run_metadata) tf.logging.info('Saved op log to %s', self._output_dir) self._active = False self._done = True self._active = (step_done >= self.params['step'])
class TrainSampleHook(TrainingHook): 'Occasionally samples predictions from the training run and prints them.\n\n Params:\n every_n_secs: Sample predictions every N seconds.\n If set, `every_n_steps` must be None.\n every_n_steps: Sample predictions every N steps.\n If set, `every_n_secs` must be None.\n sample_dir: Optional, a directory to write samples to.\n delimiter: Join tokens on this delimiter. Defaults to space.\n ' def __init__(self, params, model_dir, run_config): super(TrainSampleHook, self).__init__(params, model_dir, run_config) self._sample_dir = os.path.join(self.model_dir, 'samples') self._timer = SecondOrStepTimer(every_secs=self.params['every_n_secs'], every_steps=self.params['every_n_steps']) self._pred_dict = {} self._should_trigger = False self._iter_count = 0 self._global_step = None self._source_delimiter = self.params['source_delimiter'] self._target_delimiter = self.params['target_delimiter'] @staticmethod def default_params(): return {'every_n_secs': None, 'every_n_steps': 1000, 'source_delimiter': ' ', 'target_delimiter': ' '} def begin(self): self._iter_count = 0 self._global_step = tf.train.get_global_step() self._pred_dict = graph_utils.get_dict_from_collection('predictions') if (self._sample_dir is not None): gfile.MakeDirs(self._sample_dir) def before_run(self, _run_context): self._should_trigger = self._timer.should_trigger_for_step(self._iter_count) if self._should_trigger: fetches = {'predicted_tokens': self._pred_dict['predicted_tokens'], 'target_words': self._pred_dict['labels.target_tokens'], 'target_len': self._pred_dict['labels.target_len']} return tf.train.SessionRunArgs([fetches, self._global_step]) return tf.train.SessionRunArgs([{}, self._global_step]) def after_create_session(self, session, coord): print('Session created. Finalizing graph.') session.graph.finalize() def after_run(self, _run_context, run_values): (result_dict, step) = run_values.results self._iter_count = step if (not self._should_trigger): return None result_dicts = [dict(zip(result_dict, t)) for t in zip(*result_dict.values())] result_str = '' result_str += 'Prediction followed by Target @ Step {}\n'.format(step) result_str += (('=' * 100) + '\n') for result in result_dicts: target_len = result['target_len'] predicted_slice = result['predicted_tokens'][:(target_len - 1)] target_slice = result['target_words'][1:target_len] result_str += (self._target_delimiter.encode('utf-8').join(predicted_slice).decode('utf-8') + '\n') result_str += (self._target_delimiter.encode('utf-8').join(target_slice).decode('utf-8') + '\n\n') result_str += (('=' * 100) + '\n\n') tf.logging.info(result_str) if self._sample_dir: filepath = os.path.join(self._sample_dir, 'samples_{:06d}.txt'.format(step)) with gfile.GFile(filepath, 'w') as file: file.write(result_str) self._timer.update_last_triggered_step((self._iter_count - 1))
class PrintModelAnalysisHook(TrainingHook): 'Writes the parameters of the model to a file and stdout.\n ' def __init__(self, params, model_dir, run_config): super(PrintModelAnalysisHook, self).__init__(params, model_dir, run_config) self._filename = os.path.join(self.model_dir, 'model_analysis.txt') @staticmethod def default_params(): return {} def begin(self): if self.is_chief: opts = tf.contrib.tfprof.model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS opts['dump_to_file'] = os.path.abspath(self._filename) tf.contrib.tfprof.model_analyzer.print_model_analysis(tf.get_default_graph(), tfprof_options=opts) with gfile.GFile(self._filename) as file: tf.logging.info(file.read())
class VariableRestoreHook(TrainingHook): 'A hooks that restored variables from a given checkpoints.\n\n Params:\n prefix: Variables matching this prefix are restored.\n checkpoint_path: Path to the checkpoint to restore variables from.\n ' def __init__(self, params, model_dir, run_config): super(VariableRestoreHook, self).__init__(params, model_dir, run_config) self._saver = None @staticmethod def default_params(): return {'prefix': '', 'checkpoint_path': ''} def begin(self): variables = tf.contrib.framework.get_variables(scope=self.params['prefix']) def varname_in_checkpoint(name): 'Removes the prefix from the variable name.\n ' prefix_parts = self.params['prefix'].split('/') checkpoint_prefix = '/'.join(prefix_parts[:(- 1)]) return name.replace((checkpoint_prefix + '/'), '') target_names = [varname_in_checkpoint(_.op.name) for _ in variables] restore_map = {k: v for (k, v) in zip(target_names, variables)} tf.logging.info('Restoring variables: \n%s', yaml.dump({k: v.op.name for (k, v) in restore_map.items()})) self._saver = tf.train.Saver(restore_map) def after_create_session(self, session, coord): self._saver.restore(session, self.params['checkpoint_path']) tf.logging.info('Successfully restored all variables')
class DelayStartHook(TrainingHook, tf.train.GlobalStepWaiterHook): 'Delays the start of the current worker process until global step\n K * task_id is reached. K is a parameter.\n ' def __init__(self, params, model_dir, run_config): TrainingHook.__init__(self, params, model_dir, run_config) self._task_id = self._run_config.task_id self._delay_k = self.params['delay_k'] self._wait_until_step = int((self._delay_k * self._task_id)) tf.train.GlobalStepWaiterHook.__init__(self, self._wait_until_step) @staticmethod def default_params(): return {'delay_k': 500}
class SyncReplicasOptimizerHook(TrainingHook): 'A SessionRunHook handles ops related to SyncReplicasOptimizer.' def __init__(self, params, model_dir, run_config): super(SyncReplicasOptimizerHook, self).__init__(params, model_dir, run_config) self._sync_optimizer = None self._num_tokens = (- 1) self._local_init_op = None self._ready_for_local_init_op = None self._q_runner = None self._init_tokens_op = None @staticmethod def default_params(): return {} def begin(self): if (global_vars.SYNC_REPLICAS_OPTIMIZER is not None): self._sync_optimizer = global_vars.SYNC_REPLICAS_OPTIMIZER else: return if (self._sync_optimizer._gradients_applied is False): raise ValueError('SyncReplicasOptimizer.apply_gradient should be called before using the hook.') if self.is_chief: self._local_init_op = self._sync_optimizer.chief_init_op self._ready_for_local_init_op = self._sync_optimizer.ready_for_local_init_op self._q_runner = self._sync_optimizer.get_chief_queue_runner() self._init_tokens_op = self._sync_optimizer.get_init_tokens_op(self._num_tokens) else: self._local_init_op = self._sync_optimizer.local_step_init_op self._ready_for_local_init_op = self._sync_optimizer.ready_for_local_init_op self._q_runner = None self._init_tokens_op = None def after_create_session(self, session, coord): 'Runs SyncReplicasOptimizer initialization ops.' if (not self._sync_optimizer): return tf.logging.info('Found SyncReplicasOptimizer. Initializing.') (local_init_success, msg) = session_manager._ready(self._ready_for_local_init_op, session, 'Model is not ready for SyncReplicasOptimizer local init.') if (not local_init_success): raise RuntimeError(('Init operations did not make model ready for SyncReplicasOptimizer local_init. Init op: %s, error: %s' % (self._local_init_op.name, msg))) session.run(self._local_init_op) if (self._init_tokens_op is not None): session.run(self._init_tokens_op) if (self._q_runner is not None): self._q_runner.create_threads(session, coord=coord, daemon=True, start=True)
class TrainOptions(object): 'A collection of options that are passed to the training script\n and can be saved to perform inference later.\n\n Args:\n task: Name of the training task class.\n task_params: A dictionary of parameters passed to the training task.\n ' def __init__(self, model_class, model_params): self._model_class = model_class self._model_params = model_params @property def model_class(self): 'Returns the training task parameters' return self._model_class @property def model_params(self): 'Returns the training task class' return self._model_params @staticmethod def path(model_dir): 'Returns the path to the options file.\n\n Args:\n model_dir: The model directory\n ' return os.path.join(model_dir, 'train_options.json') def dump(self, model_dir): 'Dumps the options to a file in the model directory.\n\n Args:\n model_dir: Path to the model directory. The options will be\n dumped into a file in this directory.\n ' gfile.MakeDirs(model_dir) options_dict = {'model_class': self.model_class, 'model_params': self.model_params} with gfile.GFile(TrainOptions.path(model_dir), 'wb') as file: file.write(json.dumps(options_dict).encode('utf-8')) @staticmethod def load(model_dir): ' Loads options from the given model directory.\n\n Args:\n model_dir: Path to the model directory.\n ' with gfile.GFile(TrainOptions.path(model_dir), 'rb') as file: options_dict = json.loads(file.read().decode('utf-8')) options_dict = defaultdict(None, options_dict) return TrainOptions(model_class=options_dict['model_class'], model_params=options_dict['model_params'])
def cell_from_spec(cell_classname, cell_params): 'Create a RNN Cell instance from a JSON string.\n\n Args:\n cell_classname: Name of the cell class, e.g. "BasicLSTMCell".\n cell_params: A dictionary of parameters to pass to the cell constructor.\n\n Returns:\n A RNNCell instance.\n ' cell_params = cell_params.copy() cell_class = (locate(cell_classname) or getattr(rnn_cell, cell_classname)) cell_args = set(inspect.getargspec(cell_class.__init__).args[1:]) for key in cell_params.keys(): if (key not in cell_args): raise ValueError('{} is not a valid argument for {} class. Available arguments\n are: {}'.format(key, cell_class.__name__, cell_args)) return cell_class(**cell_params)
def get_rnn_cell(cell_class, cell_params, num_layers=1, dropout_input_keep_prob=1.0, dropout_output_keep_prob=1.0, residual_connections=False, residual_combiner='add', residual_dense=False): 'Creates a new RNN Cell\n\n Args:\n cell_class: Name of the cell class, e.g. "BasicLSTMCell".\n cell_params: A dictionary of parameters to pass to the cell constructor.\n num_layers: Number of layers. The cell will be wrapped with\n `tf.contrib.rnn.MultiRNNCell`\n dropout_input_keep_prob: Dropout keep probability applied\n to the input of cell *at each layer*\n dropout_output_keep_prob: Dropout keep probability applied\n to the output of cell *at each layer*\n residual_connections: If true, add residual connections\n between all cells\n\n Returns:\n An instance of `tf.contrib.rnn.RNNCell`.\n ' cells = [] for _ in range(num_layers): cell = cell_from_spec(cell_class, cell_params) if ((dropout_input_keep_prob < 1.0) or (dropout_output_keep_prob < 1.0)): cell = tf.contrib.rnn.DropoutWrapper(cell=cell, input_keep_prob=dropout_input_keep_prob, output_keep_prob=dropout_output_keep_prob) cells.append(cell) if (len(cells) > 1): final_cell = rnn_cell.ExtendedMultiRNNCell(cells=cells, residual_connections=residual_connections, residual_combiner=residual_combiner, residual_dense=residual_dense) else: final_cell = cells[0] return final_cell
def create_learning_rate_decay_fn(decay_type, decay_steps, decay_rate, start_decay_at=0, stop_decay_at=1000000000.0, min_learning_rate=None, staircase=False): "Creates a function that decays the learning rate.\n\n Args:\n decay_steps: How often to apply decay.\n decay_rate: A Python number. The decay rate.\n start_decay_at: Don't decay before this step\n stop_decay_at: Don't decay after this step\n min_learning_rate: Don't decay below this number\n decay_type: A decay function name defined in `tf.train`\n staircase: Whether to apply decay in a discrete staircase,\n as opposed to continuous, fashion.\n\n Returns:\n A function that takes (learning_rate, global_step) as inputs\n and returns the learning rate for the given step.\n Returns `None` if decay_type is empty or None.\n " if ((decay_type is None) or (decay_type == '')): return None start_decay_at = tf.to_int32(start_decay_at) stop_decay_at = tf.to_int32(stop_decay_at) def decay_fn(learning_rate, global_step): 'The computed learning rate decay function.\n ' global_step = tf.to_int32(global_step) decay_type_fn = getattr(tf.train, decay_type) decayed_learning_rate = decay_type_fn(learning_rate=learning_rate, global_step=(tf.minimum(global_step, stop_decay_at) - start_decay_at), decay_steps=decay_steps, decay_rate=decay_rate, staircase=staircase, name='decayed_learning_rate') final_lr = tf.train.piecewise_constant(x=global_step, boundaries=[start_decay_at], values=[learning_rate, decayed_learning_rate]) if min_learning_rate: final_lr = tf.maximum(final_lr, min_learning_rate) return final_lr return decay_fn
def create_input_fn(pipeline, batch_size, bucket_boundaries=None, allow_smaller_final_batch=False, scope=None): 'Creates an input function that can be used with tf.learn estimators.\n Note that you must pass "factory funcitons" for both the data provider and\n featurizer to ensure that everything will be created in the same graph.\n\n Args:\n pipeline: An instance of `seq2seq.data.InputPipeline`.\n batch_size: Create batches of this size. A queue to hold a\n reasonable number of batches in memory is created.\n bucket_boundaries: int list, increasing non-negative numbers.\n If None, no bucket is performed.\n\n Returns:\n An input function that returns `(feature_batch, labels_batch)`\n tuples when called.\n ' def input_fn(): 'Creates features and labels.\n ' with tf.variable_scope((scope or 'input_fn')): data_provider = pipeline.make_data_provider() features_and_labels = pipeline.read_from_data_provider(data_provider) if bucket_boundaries: (_, batch) = tf.contrib.training.bucket_by_sequence_length(input_length=features_and_labels['source_len'], bucket_boundaries=bucket_boundaries, tensors=features_and_labels, batch_size=batch_size, keep_input=(features_and_labels['source_len'] >= 1), dynamic_pad=True, capacity=(5000 + (16 * batch_size)), allow_smaller_final_batch=allow_smaller_final_batch, name='bucket_queue') else: batch = tf.train.batch(tensors=features_and_labels, enqueue_many=False, batch_size=batch_size, dynamic_pad=True, capacity=(5000 + (16 * batch_size)), allow_smaller_final_batch=allow_smaller_final_batch, name='batch_queue') features_batch = {k: batch[k] for k in pipeline.feature_keys} if set(batch.keys()).intersection(pipeline.label_keys): labels_batch = {k: batch[k] for k in pipeline.label_keys} else: labels_batch = None return (features_batch, labels_batch) return input_fn
class DBEngine(): def __init__(self, fdb): self.db = records.Database('sqlite:///{}'.format(fdb)) def execute_query(self, table_id, query, *args, **kwargs): return self.execute(table_id, query.sel_index, query.agg_index, query.conditions, *args, **kwargs) def execute(self, table_id, select_index, aggregation_index, conditions, lower=True): if (not table_id.startswith('table')): table_id = 'table_{}'.format(table_id.replace('-', '_')) table_info = self.db.query('SELECT sql from sqlite_master WHERE tbl_name = :name', name=table_id).all()[0].sql.replace('\n', '') schema_str = schema_re.findall(table_info)[0] schema = {} for tup in schema_str.split(', '): (c, t) = tup.split() schema[c] = t select = 'col{}'.format(select_index) agg = agg_ops[aggregation_index] if agg: select = '{}({})'.format(agg, select) where_clause = [] where_map = {} for (col_index, op, val) in conditions: if (lower and (isinstance(val, str) or isinstance(val, unicode))): val = val.lower() if ((schema['col{}'.format(col_index)] == 'real') and (not isinstance(val, (int, float)))): try: val = float(parse_decimal(val)) except NumberFormatError as e: val = float(num_re.findall(val)[0]) where_clause.append('col{} {} :col{}'.format(col_index, cond_ops[op], col_index)) where_map['col{}'.format(col_index)] = val where_str = '' if where_clause: where_str = ('WHERE ' + ' AND '.join(where_clause)) query = 'SELECT {} AS result FROM {} {}'.format(select, table_id, where_str) out = self.db.query(query, **where_map) return [o.result for o in out]
def refractor_db_data(data_, table): rec = {} for table_name in table['table_names']: rec[table_name] = [] for entry in table['column_names']: if (entry[0] < 0): continue this_table = table['table_names'][entry[0]] rec[this_table].append(entry[1]) res = {} for (k, v) in data_.items(): this_table = k.lower().replace('_', ' ') if (this_table not in rec): continue for row in v: for (idx, val) in enumerate(row): if (val is None): continue if ((type(val) is str) and (len(val) > 0)): if (idx < len(rec[this_table])): res[val.lower()] = (rec[this_table][idx].split(), this_table.split()) continue if (idx < len(rec[this_table])): res[str(val)] = (rec[this_table][idx].split(), this_table.split()) return res
def toksEQ(toks1, toks2): str1 = ' '.join([wordnet_lemmatizer.lemmatize(tok) for tok in toks1]) str2 = ' '.join([wordnet_lemmatizer.lemmatize(tok) for tok in toks2]) return (str1 == str2)
def isNumber(val): try: int(val) return True except: return False
def group_header(toks, idx, num_toks, header_toks): for endIdx in reversed(range((idx + 1), (num_toks + 1))): sub_toks = toks[idx:endIdx] if (sub_toks in header_toks): return (endIdx, sub_toks) return (idx, None)
def group_val(toks, idx, num_toks, db_data): for endIdx in reversed(range((idx + 1), (num_toks + 1))): sub_toks = toks[idx:endIdx] key = ' '.join(sub_toks) if (key in db_data): return (endIdx, sub_toks) return (idx, None)
def group_table(toks, idx, num_toks, table_names): table_toks = [name.split() for name in table_names] for endIdx in reversed(range((idx + 1), (num_toks + 1))): sub_toks = toks[idx:endIdx] if (sub_toks in table_toks): return (endIdx, sub_toks) return (idx, None)
def parse_entry(entry, tables_dict, db_data_dict): table = tables_dict[entry['db_id']] db_data = {} if (entry['db_id'] in db_data_dict): db_data = db_data_dict[entry['db_id']] question_toks = [tok.lower() for tok in entry['question_toks']] question_toks_lem = [wordnet_lemmatizer.lemmatize(t) for t in question_toks] header_toks = [] col2table = {} for col in table['column_names']: this_header_tok = col[1].split() header_toks.append(this_header_tok) if (col[0] < 0): col2table[' '.join(this_header_tok)] = ['all'] else: col2table[' '.join(this_header_tok)] = table['table_names'][col[0]].split() idx = 0 num_toks = len(question_toks) tok_concol = [] type_concol = [] while (idx < num_toks): (end_idx, tname) = group_table(question_toks_lem, idx, num_toks, table['table_names']) if tname: tok_concol.append(question_toks[idx:end_idx]) type_concol.append(['table']) idx = end_idx continue (end_idx, header) = group_header(question_toks_lem, idx, num_toks, header_toks) if header: tok_concol.append(question_toks[idx:end_idx]) type_concol.append(col2table[' '.join(header)]) idx = end_idx continue (end_idx, val) = group_val(question_toks_lem, idx, num_toks, db_data) if val: tok_concol.append(question_toks[idx:end_idx]) (col, table_) = db_data[' '.join(val)] if isNumber(' '.join(val)): type_concol.append(((col + table_) + ['number'])) else: type_concol.append((col + table_)) idx = end_idx continue tok_concol.append([question_toks[idx]]) type_concol.append([NONE]) idx += 1 entry[TOK] = tok_concol entry[TYPE] = type_concol return entry
def isNumber(val): try: int(val) return True except: return False
def group_table(toks, idx, num_toks, table_names): table_toks = [name.split() for name in table_names] for endIdx in reversed(range((idx + 1), (num_toks + 1))): sub_toks = toks[idx:endIdx] if (sub_toks in table_toks): return (endIdx, sub_toks) return (idx, None)
def get_wikitable_name(table): if ('page_title' in table): return table['page_title'].lower().split() if ('section_title' in table): return table['section_title'].lower().split() if ('caption' in table): return table['caption'].lower().split() return [DEFAULT_TABLENAME]
def parse_processed_wiki(entry): table = wikitables_dict[entry['db_id']] parsed_wikisql = wiki_dict[entry['question'].lower()] entry.update(parsed_wikisql) headers = table[HEADER] table_name = get_wikitable_name(table) for i in range(len(entry[TYPE])): if (entry[TYPE][i] == ['column']): entry[TYPE][i] = table_name elif (entry[TYPE][i] in headers): entry[TYPE][i] += table_name return entry
def group_header(toks, idx, num_toks, header_toks): for endIdx in reversed(range((idx + 1), (num_toks + 1))): sub_toks = toks[idx:endIdx] if (sub_toks in header_toks): return (endIdx, sub_toks) return (idx, None)
def group_val(toks, idx, num_toks, val2col): if (toks[idx] in val2col): return ((idx + 1), val2col[toks[idx]]) return (idx, None)
def refractor_db_data(data_, table): rec = {} for table_name in table['table_names']: rec[table_name] = [] for entry in table['column_names']: if (entry[0] < 0): continue this_table = table['table_names'][entry[0]] rec[this_table].append(entry[1]) res = {} for (k, v) in data_.items(): this_table = k.lower().replace('_', ' ') if (this_table not in rec): continue for row in v: for (idx, val) in enumerate(row): if (val is None): continue if ((type(val) is str) and (len(val) > 0)): if (idx < len(rec[this_table])): res[val.lower()] = (rec[this_table][idx].split(), this_table.split()) continue if (idx < len(rec[this_table])): res[str(val)] = (rec[this_table][idx].split(), this_table.split()) return res
def toksEQ(toks1, toks2): str1 = ' '.join([wordnet_lemmatizer.lemmatize(tok) for tok in toks1]) str2 = ' '.join([wordnet_lemmatizer.lemmatize(tok) for tok in toks2]) return (str1 == str2)
def isNumber(val): try: int(val) return True except: return False
def group_header(toks, idx, num_toks, header_toks): for endIdx in reversed(range((idx + 1), (num_toks + 1))): sub_toks = toks[idx:endIdx] if (sub_toks in header_toks): return (endIdx, sub_toks) return (idx, None)
def group_val(toks, idx, num_toks, db_data): for endIdx in reversed(range((idx + 1), (num_toks + 1))): sub_toks = toks[idx:endIdx] key = ' '.join(sub_toks) if (key in db_data): return (endIdx, sub_toks) return (idx, None)
def group_table(toks, idx, num_toks, table_names): table_toks = [name.split() for name in table_names] for endIdx in reversed(range((idx + 1), (num_toks + 1))): sub_toks = toks[idx:endIdx] if (sub_toks in table_toks): return (endIdx, sub_toks) return (idx, None)
def parse_entry(entry, tables_dict, db_data_dict): table = tables_dict[entry['db_id']] db_data = {} if (entry['db_id'] in db_data_dict): db_data = db_data_dict[entry['db_id']] question_toks = [tok.lower() for tok in entry['question_toks']] question_toks_lem = [wordnet_lemmatizer.lemmatize(t) for t in question_toks] header_toks = [] col2table = {} for col in table['column_names']: this_header_tok = col[1].split() header_toks.append(this_header_tok) if (col[0] < 0): col2table[' '.join(this_header_tok)] = ['all'] else: col2table[' '.join(this_header_tok)] = table['table_names'][col[0]].split() idx = 0 num_toks = len(question_toks) tok_concol = [] type_concol = [] while (idx < num_toks): (end_idx, tname) = group_table(question_toks_lem, idx, num_toks, table['table_names']) if tname: tok_concol.append(question_toks[idx:end_idx]) type_concol.append(['table']) idx = end_idx continue (end_idx, header) = group_header(question_toks_lem, idx, num_toks, header_toks) if header: tok_concol.append(question_toks[idx:end_idx]) type_concol.append(col2table[' '.join(header)]) idx = end_idx continue (end_idx, val) = group_val(question_toks_lem, idx, num_toks, db_data) if val: tok_concol.append(question_toks[idx:end_idx]) (col, table_) = db_data[' '.join(val)] if isNumber(' '.join(val)): type_concol.append(((col + table_) + ['number'])) else: type_concol.append((col + table_)) idx = end_idx continue tok_concol.append([question_toks[idx]]) type_concol.append([NONE]) idx += 1 entry[TOK] = tok_concol entry[TYPE] = type_concol return entry
def isNumber(val): try: int(val) return True except: return False
def group_table(toks, idx, num_toks, table_names): table_toks = [name.split() for name in table_names] for endIdx in reversed(range((idx + 1), (num_toks + 1))): sub_toks = toks[idx:endIdx] if (sub_toks in table_toks): return (endIdx, sub_toks) return (idx, None)
def get_wikitable_name(table): if ('page_title' in table): return table['page_title'].lower().split() if ('section_title' in table): return table['section_title'].lower().split() if ('caption' in table): return table['caption'].lower().split() return [DEFAULT_TABLENAME]
def parse_processed_wiki(entry): table = wikitables_dict[entry['db_id']] parsed_wikisql = wiki_dict[entry['question'].lower()] entry.update(parsed_wikisql) headers = table[HEADER] table_name = get_wikitable_name(table) for i in range(len(entry[TYPE])): if (entry[TYPE][i] == ['column']): entry[TYPE][i] = table_name elif (entry[TYPE][i] in headers): entry[TYPE][i] += table_name return entry
def group_header(toks, idx, num_toks, header_toks): for endIdx in reversed(range((idx + 1), (num_toks + 1))): sub_toks = toks[idx:endIdx] if (sub_toks in header_toks): return (endIdx, sub_toks) return (idx, None)
def group_val(toks, idx, num_toks, val2col): if (toks[idx] in val2col): return ((idx + 1), val2col[toks[idx]]) return (idx, None)
class DBEngine(): def __init__(self, fdb): self.db = records.Database('sqlite:///{}'.format(fdb)) def execute_query(self, table_id, query, *args, **kwargs): return self.execute(table_id, query.sel_index, query.agg_index, query.conditions, *args, **kwargs) def execute(self, table_id, select_index, aggregation_index, conditions, lower=True): if (not table_id.startswith('table')): table_id = 'table_{}'.format(table_id.replace('-', '_')) table_info = self.db.query('SELECT sql from sqlite_master WHERE tbl_name = :name', name=table_id).all()[0].sql.replace('\n', '') schema_str = schema_re.findall(table_info)[0] schema = {} for tup in schema_str.split(', '): (c, t) = tup.split() schema[c] = t select = 'col{}'.format(select_index) agg = agg_ops[aggregation_index] if agg: select = '{}({})'.format(agg, select) where_clause = [] where_map = {} for (col_index, op, val) in conditions: if (lower and (isinstance(val, str) or isinstance(val, unicode))): val = val.lower() if ((schema['col{}'.format(col_index)] == 'real') and (not isinstance(val, (int, float)))): try: val = float(parse_decimal(val)) except NumberFormatError as e: val = float(num_re.findall(val)[0]) where_clause.append('col{} {} :col{}'.format(col_index, cond_ops[op], col_index)) where_map['col{}'.format(col_index)] = val where_str = '' if where_clause: where_str = ('WHERE ' + ' AND '.join(where_clause)) query = 'SELECT {} AS result FROM {} {}'.format(select, table_id, where_str) out = self.db.query(query, **where_map) return [o.result for o in out]
def condition_has_or(conds): return ('or' in conds[1::2])
def condition_has_like(conds): return (WHERE_OPS.index('like') in [cond_unit[1] for cond_unit in conds[::2]])
def condition_has_sql(conds): for cond_unit in conds[::2]: (val1, val2) = (cond_unit[3], cond_unit[4]) if ((val1 is not None) and (type(val1) is dict)): return True if ((val2 is not None) and (type(val2) is dict)): return True return False
def val_has_op(val_unit): return (val_unit[0] != UNIT_OPS.index('none'))
def has_agg(unit): return (unit[0] != AGG_OPS.index('none'))
def accuracy(count, total): if (count == total): return 1 return 0
def recall(count, total): if (count == total): return 1 return 0
def F1(acc, rec): if ((acc + rec) == 0): return 0 return (((2.0 * acc) * rec) / (acc + rec))
def get_scores(count, pred_total, label_total): if (pred_total != label_total): return (0, 0, 0) elif (count == pred_total): return (1, 1, 1) return (0, 0, 0)
def eval_sel(pred, label): pred_sel = pred['select'][1] label_sel = label['select'][1] label_wo_agg = [unit[1] for unit in label_sel] pred_total = len(pred_sel) label_total = len(label_sel) cnt = 0 cnt_wo_agg = 0 for unit in pred_sel: if (unit in label_sel): cnt += 1 label_sel.remove(unit) if (unit[1] in label_wo_agg): cnt_wo_agg += 1 label_wo_agg.remove(unit[1]) return (label_total, pred_total, cnt, cnt_wo_agg)
def eval_where(pred, label): pred_conds = [unit for unit in pred['where'][::2]] label_conds = [unit for unit in label['where'][::2]] label_wo_agg = [unit[2] for unit in label_conds] pred_total = len(pred_conds) label_total = len(label_conds) cnt = 0 cnt_wo_agg = 0 for unit in pred_conds: if (unit in label_conds): cnt += 1 label_conds.remove(unit) if (unit[2] in label_wo_agg): cnt_wo_agg += 1 label_wo_agg.remove(unit[2]) return (label_total, pred_total, cnt, cnt_wo_agg)
def eval_group(pred, label): pred_cols = [unit[1] for unit in pred['groupBy']] label_cols = [unit[1] for unit in label['groupBy']] pred_total = len(pred_cols) label_total = len(label_cols) cnt = 0 pred_cols = [(pred.split('.')[1] if ('.' in pred) else pred) for pred in pred_cols] label_cols = [(label.split('.')[1] if ('.' in label) else label) for label in label_cols] for col in pred_cols: if (col in label_cols): cnt += 1 label_cols.remove(col) return (label_total, pred_total, cnt)
def eval_having(pred, label): pred_total = label_total = cnt = 0 if (len(pred['groupBy']) > 0): pred_total = 1 if (len(label['groupBy']) > 0): label_total = 1 pred_cols = [unit[1] for unit in pred['groupBy']] label_cols = [unit[1] for unit in label['groupBy']] if ((pred_total == label_total == 1) and (pred_cols == label_cols) and (pred['having'] == label['having'])): cnt = 1 return (label_total, pred_total, cnt)
def eval_order(pred, label): pred_total = label_total = cnt = 0 if (len(pred['orderBy']) > 0): pred_total = 1 if (len(label['orderBy']) > 0): label_total = 1 if ((len(label['orderBy']) > 0) and (pred['orderBy'] == label['orderBy']) and (((pred['limit'] is None) and (label['limit'] is None)) or ((pred['limit'] is not None) and (label['limit'] is not None)))): cnt = 1 return (label_total, pred_total, cnt)
def eval_and_or(pred, label): pred_ao = pred['where'][1::2] label_ao = label['where'][1::2] pred_ao = set(pred_ao) label_ao = set(label_ao) if (pred_ao == label_ao): return (1, 1, 1) return (len(pred_ao), len(label_ao), 0)
def get_nestedSQL(sql): nested = [] for cond_unit in ((sql['from']['conds'][::2] + sql['where'][::2]) + sql['having'][::2]): if (type(cond_unit[3]) is dict): nested.append(cond_unit[3]) if (type(cond_unit[4]) is dict): nested.append(cond_unit[4]) if (sql['intersect'] is not None): nested.append(sql['intersect']) if (sql['except'] is not None): nested.append(sql['except']) if (sql['union'] is not None): nested.append(sql['union']) return nested
def eval_nested(pred, label): label_total = 0 pred_total = 0 cnt = 0 if (pred is not None): pred_total += 1 if (label is not None): label_total += 1 if ((pred is not None) and (label is not None)): partial_scores = Evaluator.eval_partial_match(pred, label) cnt += Evaluator.eval_exact_match(pred, label, partial_scores) return (label_total, pred_total, cnt)
def eval_IUEN(pred, label): (lt1, pt1, cnt1) = eval_nested(pred['intersect'], label['intersect']) (lt2, pt2, cnt2) = eval_nested(pred['except'], label['except']) (lt3, pt3, cnt3) = eval_nested(pred['union'], label['union']) label_total = ((lt1 + lt2) + lt3) pred_total = ((pt1 + pt2) + pt3) cnt = ((cnt1 + cnt2) + cnt3) return (label_total, pred_total, cnt)
def get_keywords(sql): res = set() if (len(sql['where']) > 0): res.add('where') if (len(sql['groupBy']) > 0): res.add('group') if (len(sql['having']) > 0): res.add('having') if (len(sql['orderBy']) > 0): res.add(sql['orderBy'][0]) res.add('order') if (sql['limit'] is not None): res.add('limit') if (sql['except'] is not None): res.add('except') if (sql['union'] is not None): res.add('union') if (sql['intersect'] is not None): res.add('intersect') ao = ((sql['from']['conds'][1::2] + sql['where'][1::2]) + sql['having'][1::2]) if (len([token for token in ao if (token == 'or')]) > 0): res.add('or') cond_units = ((sql['from']['conds'][::2] + sql['where'][::2]) + sql['having'][::2]) if (len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0): res.add('not') if (len([cond_unit for cond_unit in cond_units if (cond_unit[1] == WHERE_OPS.index('in'))]) > 0): res.add('in') if (len([cond_unit for cond_unit in cond_units if (cond_unit[1] == WHERE_OPS.index('like'))]) > 0): res.add('like') return res
def eval_keywords(pred, label): pred_keywords = get_keywords(pred) label_keywords = get_keywords(label) pred_total = len(pred_keywords) label_total = len(label_keywords) cnt = 0 for k in pred_keywords: if (k in label_keywords): cnt += 1 return (label_total, pred_total, cnt)
def count_agg(units): return len([unit for unit in units if has_agg(unit)])
def count_component1(sql): count = 0 if (len(sql['where']) > 0): count += 1 if (len(sql['groupBy']) > 0): count += 1 if (len(sql['orderBy']) > 0): count += 1 if (sql['limit'] is not None): count += 1 if (len(sql['from']['table_units']) > 0): count += (len(sql['from']['table_units']) - 1) ao = ((sql['from']['conds'][1::2] + sql['where'][1::2]) + sql['having'][1::2]) count += len([token for token in ao if (token == 'or')]) cond_units = ((sql['from']['conds'][::2] + sql['where'][::2]) + sql['having'][::2]) count += len([cond_unit for cond_unit in cond_units if (cond_unit[1] == WHERE_OPS.index('like'))]) return count
def count_component2(sql): nested = get_nestedSQL(sql) return len(nested)
def count_others(sql): count = 0 agg_count = count_agg(sql['select'][1]) agg_count += count_agg(sql['where'][::2]) agg_count += count_agg(sql['groupBy']) if (len(sql['orderBy']) > 0): agg_count += count_agg(([unit[1] for unit in sql['orderBy'][1] if unit[1]] + [unit[2] for unit in sql['orderBy'][1] if unit[2]])) agg_count += count_agg(sql['having']) if (agg_count > 1): count += 1 if (len(sql['select'][1]) > 1): count += 1 if (len(sql['where']) > 1): count += 1 if (len(sql['groupBy']) > 1): count += 1 return count
class Evaluator(): 'A simple evaluator' def __init__(self, db_dir, kmaps, etype): self.db_dir = db_dir self.kmaps = kmaps self.etype = etype self.db_paths = {} self.schemas = {} for db_name in self.kmaps.keys(): db_path = os.path.join(db_dir, db_name, (db_name + '.sqlite')) self.db_paths[db_name] = db_path self.schemas[db_name] = Schema(get_schema(db_path)) self.scores = {level: {'count': 0, 'partial': {type_: {'acc': 0.0, 'rec': 0.0, 'f1': 0.0, 'acc_count': 0, 'rec_count': 0} for type_ in PARTIAL_TYPES}, 'exact': 0.0, 'exec': 0} for level in LEVELS} def eval_hardness(self, sql): count_comp1_ = count_component1(sql) count_comp2_ = count_component2(sql) count_others_ = count_others(sql) if ((count_comp1_ <= 1) and (count_others_ == 0) and (count_comp2_ == 0)): return 'easy' elif (((count_others_ <= 2) and (count_comp1_ <= 1) and (count_comp2_ == 0)) or ((count_comp1_ <= 2) and (count_others_ < 2) and (count_comp2_ == 0))): return 'medium' elif (((count_others_ > 2) and (count_comp1_ <= 2) and (count_comp2_ == 0)) or ((2 < count_comp1_ <= 3) and (count_others_ <= 2) and (count_comp2_ == 0)) or ((count_comp1_ <= 1) and (count_others_ == 0) and (count_comp2_ <= 1))): return 'hard' else: return 'extra' @classmethod def eval_exact_match(cls, pred, label, partial_scores): for (_, score) in list(partial_scores.items()): if (score['f1'] != 1): return 0 if (len(label['from']['table_units']) > 0): label_tables = sorted(label['from']['table_units']) pred_tables = sorted(pred['from']['table_units']) return (label_tables == pred_tables) return 1 @classmethod def eval_partial_match(cls, pred, label): res = {} (label_total, pred_total, cnt, cnt_wo_agg) = eval_sel(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['select'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (acc, rec, f1) = get_scores(cnt_wo_agg, pred_total, label_total) res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (label_total, pred_total, cnt, cnt_wo_agg) = eval_where(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['where'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (acc, rec, f1) = get_scores(cnt_wo_agg, pred_total, label_total) res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (label_total, pred_total, cnt) = eval_group(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (label_total, pred_total, cnt) = eval_having(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['group'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (label_total, pred_total, cnt) = eval_order(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['order'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (label_total, pred_total, cnt) = eval_and_or(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (label_total, pred_total, cnt) = eval_IUEN(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} (label_total, pred_total, cnt) = eval_keywords(pred, label) (acc, rec, f1) = get_scores(cnt, pred_total, label_total) res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total} return res def evaluate_one(self, db_name, gold, predicted): schema = self.schemas[db_name] g_sql = get_sql(schema, gold) hardness = self.eval_hardness(g_sql) self.scores[hardness]['count'] += 1 self.scores['all']['count'] += 1 parse_error = False try: p_sql = get_sql(schema, predicted) except: p_sql = {'except': None, 'from': {'conds': [], 'table_units': []}, 'groupBy': [], 'having': [], 'intersect': None, 'limit': None, 'orderBy': [], 'select': [False, []], 'union': None, 'where': []} parse_error = True kmap = self.kmaps[db_name] g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema) g_sql = rebuild_sql_val(g_sql) g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap) p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema) p_sql = rebuild_sql_val(p_sql) p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap) if (self.etype in ['all', 'exec']): self.scores[hardness]['exec'] += eval_exec_match(self.db_paths[db_name], predicted, gold, p_sql, g_sql) if (self.etype in ['all', 'match']): partial_scores = self.eval_partial_match(p_sql, g_sql) exact_score = self.eval_exact_match(p_sql, g_sql, partial_scores) self.scores[hardness]['exact'] += exact_score self.scores['all']['exact'] += exact_score for type_ in PARTIAL_TYPES: if (partial_scores[type_]['pred_total'] > 0): self.scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc'] self.scores[hardness]['partial'][type_]['acc_count'] += 1 if (partial_scores[type_]['label_total'] > 0): self.scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec'] self.scores[hardness]['partial'][type_]['rec_count'] += 1 self.scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1'] if (partial_scores[type_]['pred_total'] > 0): self.scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc'] self.scores['all']['partial'][type_]['acc_count'] += 1 if (partial_scores[type_]['label_total'] > 0): self.scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec'] self.scores['all']['partial'][type_]['rec_count'] += 1 self.scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1'] return {'predicted': predicted, 'gold': gold, 'predicted_parse_error': parse_error, 'hardness': hardness, 'exact': exact_score, 'partial': partial_scores} def finalize(self): scores = self.scores for level in LEVELS: if (scores[level]['count'] == 0): continue if (self.etype in ['all', 'exec']): scores[level]['exec'] /= scores[level]['count'] if (self.etype in ['all', 'match']): scores[level]['exact'] /= scores[level]['count'] for type_ in PARTIAL_TYPES: if (scores[level]['partial'][type_]['acc_count'] == 0): scores[level]['partial'][type_]['acc'] = 0 else: scores[level]['partial'][type_]['acc'] = ((scores[level]['partial'][type_]['acc'] / scores[level]['partial'][type_]['acc_count']) * 1.0) if (scores[level]['partial'][type_]['rec_count'] == 0): scores[level]['partial'][type_]['rec'] = 0 else: scores[level]['partial'][type_]['rec'] = ((scores[level]['partial'][type_]['rec'] / scores[level]['partial'][type_]['rec_count']) * 1.0) if ((scores[level]['partial'][type_]['acc'] == 0) and (scores[level]['partial'][type_]['rec'] == 0)): scores[level]['partial'][type_]['f1'] = 1 else: scores[level]['partial'][type_]['f1'] = (((2.0 * scores[level]['partial'][type_]['acc']) * scores[level]['partial'][type_]['rec']) / (scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc']))
def isValidSQL(sql, db): conn = sqlite3.connect(db) cursor = conn.cursor() try: cursor.execute(sql) except: return False return True
def print_scores(scores, etype): LEVELS = ['easy', 'medium', 'hard', 'extra', 'all'] PARTIAL_TYPES = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)', 'group', 'order', 'and/or', 'IUEN', 'keywords'] print('{:20} {:20} {:20} {:20} {:20} {:20}'.format('', *LEVELS)) counts = [scores[level]['count'] for level in LEVELS] print('{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}'.format('count', *counts)) if (etype in ['all', 'exec']): print('===================== EXECUTION ACCURACY =====================') this_scores = [scores[level]['exec'] for level in LEVELS] print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format('execution', *this_scores)) if (etype in ['all', 'match']): print('\n====================== EXACT MATCHING ACCURACY =====================') exact_scores = [scores[level]['exact'] for level in LEVELS] print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format('exact match', *exact_scores)) print('\n---------------------PARTIAL MATCHING ACCURACY----------------------') for type_ in PARTIAL_TYPES: this_scores = [scores[level]['partial'][type_]['acc'] for level in LEVELS] print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format(type_, *this_scores)) print('---------------------- PARTIAL MATCHING RECALL ----------------------') for type_ in PARTIAL_TYPES: this_scores = [scores[level]['partial'][type_]['rec'] for level in LEVELS] print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format(type_, *this_scores)) print('---------------------- PARTIAL MATCHING F1 --------------------------') for type_ in PARTIAL_TYPES: this_scores = [scores[level]['partial'][type_]['f1'] for level in LEVELS] print('{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}'.format(type_, *this_scores))
def evaluate(gold, predict, db_dir, etype, kmaps): with open(gold) as f: glist = [l.strip().split('\t') for l in f.readlines() if (len(l.strip()) > 0)] with open(predict) as f: plist = [l.strip().split('\t') for l in f.readlines() if (len(l.strip()) > 0)] evaluator = Evaluator(db_dir, kmaps, etype) results = [] for (p, g) in zip(plist, glist): (predicted,) = p (gold, db_name) = g results.append(evaluator.evaluate_one(db_name, gold, predicted)) evaluator.finalize() print_scores(evaluator.scores, etype) return {'per_item': results, 'total_scores': evaluator.scores}
def eval_exec_match(db, p_str, g_str, pred, gold): '\n return 1 if the values between prediction and gold are matching\n in the corresponding index. Currently not support multiple col_unit(pairs).\n ' conn = sqlite3.connect(db) cursor = conn.cursor() try: cursor.execute(p_str) p_res = cursor.fetchall() except: return False cursor.execute(g_str) q_res = cursor.fetchall() def res_map(res, val_units): rmap = {} for (idx, val_unit) in enumerate(val_units): key = (tuple(val_unit[1]) if (not val_unit[2]) else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))) rmap[key] = [r[idx] for r in res] return rmap p_val_units = [unit[1] for unit in pred['select'][1]] q_val_units = [unit[1] for unit in gold['select'][1]] return (res_map(p_res, p_val_units) == res_map(q_res, q_val_units))
def rebuild_cond_unit_val(cond_unit): if ((cond_unit is None) or (not DISABLE_VALUE)): return cond_unit (not_op, op_id, val_unit, val1, val2) = cond_unit if (type(val1) is not dict): val1 = None else: val1 = rebuild_sql_val(val1) if (type(val2) is not dict): val2 = None else: val2 = rebuild_sql_val(val2) return (not_op, op_id, val_unit, val1, val2)
def rebuild_condition_val(condition): if ((condition is None) or (not DISABLE_VALUE)): return condition res = [] for (idx, it) in enumerate(condition): if ((idx % 2) == 0): res.append(rebuild_cond_unit_val(it)) else: res.append(it) return res
def rebuild_sql_val(sql): if ((sql is None) or (not DISABLE_VALUE)): return sql sql['from']['conds'] = rebuild_condition_val(sql['from']['conds']) sql['having'] = rebuild_condition_val(sql['having']) sql['where'] = rebuild_condition_val(sql['where']) sql['intersect'] = rebuild_sql_val(sql['intersect']) sql['except'] = rebuild_sql_val(sql['except']) sql['union'] = rebuild_sql_val(sql['union']) return sql
def build_valid_col_units(table_units, schema): col_ids = [table_unit[1] for table_unit in table_units if (table_unit[0] == TABLE_TYPE['table_unit'])] prefixs = [col_id[:(- 2)] for col_id in col_ids] valid_col_units = [] for value in list(schema.idMap.values()): if (('.' in value) and (value[:value.index('.')] in prefixs)): valid_col_units.append(value) return valid_col_units
def rebuild_col_unit_col(valid_col_units, col_unit, kmap): if (col_unit is None): return col_unit (agg_id, col_id, distinct) = col_unit if ((col_id in kmap) and (col_id in valid_col_units)): col_id = kmap[col_id] if DISABLE_DISTINCT: distinct = None return (agg_id, col_id, distinct)
def rebuild_val_unit_col(valid_col_units, val_unit, kmap): if (val_unit is None): return val_unit (unit_op, col_unit1, col_unit2) = val_unit col_unit1 = rebuild_col_unit_col(valid_col_units, col_unit1, kmap) col_unit2 = rebuild_col_unit_col(valid_col_units, col_unit2, kmap) return (unit_op, col_unit1, col_unit2)
def rebuild_table_unit_col(valid_col_units, table_unit, kmap): if (table_unit is None): return table_unit (table_type, col_unit_or_sql) = table_unit if isinstance(col_unit_or_sql, tuple): col_unit_or_sql = rebuild_col_unit_col(valid_col_units, col_unit_or_sql, kmap) return (table_type, col_unit_or_sql)