sia_tp_sample / Chung-I__Variational-Recurrent-Autoencoder-Tensorflow.jsonl
shahp7575's picture
commit files to HF hub
3a7f06a
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq_model.py","language":"python","identifier":"Seq2SeqModel.__init__","parameters":"(self,\n source_vocab_size,\n target_vocab_size,\n buckets,\n size,\n num_layers,\n latent_dim,\n max_gradient_norm,\n batch_size,\n learning_rate,\n kl_min=2,\n word_dropout_keep_prob=1.0,\n anneal=False,\n kl_rate_rise_factor=None,\n use_lstm=False,\n num_samples=512,\n optimizer=None,\n activation=tf.nn.relu,\n forward_only=False,\n feed_previous=True,\n bidirectional=False,\n weight_initializer=None,\n bias_initializer=None,\n iaf=False,\n dtype=tf.float32)","argument_list":"","return_statement":"","docstring":"Create the model.\n\n Args:\n source_vocab_size: size of the source vocabulary.\n target_vocab_size: size of the target vocabulary.\n buckets: a list of pairs (I, O), where I specifies maximum input length\n that will be processed in that bucket, and O specifies maximum output\n length. Training instances that have inputs longer than I or outputs\n longer than O will be pushed to the next bucket and padded accordingly.\n We assume that the list is sorted, e.g., [(2, 4), (8, 16)].\n size: number of units in each layer of the model.\n num_layers: number of layers in the model.\n max_gradient_norm: gradients will be clipped to maximally this norm.\n batch_size: the size of the batches used during training;\n the model construction is independent of batch_size, so it can be\n changed after initialization if this is convenient, e.g., for decoding.\n learning_rate: learning rate to start with.\n use_lstm: if true, we use LSTM cells instead of GRU cells.\n num_samples: number of samples for sampled softmax.\n forward_only: if set, we do not construct the backward pass in the model.\n dtype: the data type to use to store internal variables.","docstring_summary":"Create the model.","docstring_tokens":["Create","the","model","."],"function":"def __init__(self,\n source_vocab_size,\n target_vocab_size,\n buckets,\n size,\n num_layers,\n latent_dim,\n max_gradient_norm,\n batch_size,\n learning_rate,\n kl_min=2,\n word_dropout_keep_prob=1.0,\n anneal=False,\n kl_rate_rise_factor=None,\n use_lstm=False,\n num_samples=512,\n optimizer=None,\n activation=tf.nn.relu,\n forward_only=False,\n feed_previous=True,\n bidirectional=False,\n weight_initializer=None,\n bias_initializer=None,\n iaf=False,\n dtype=tf.float32):\n \"\"\"Create the model.\n\n Args:\n source_vocab_size: size of the source vocabulary.\n target_vocab_size: size of the target vocabulary.\n buckets: a list of pairs (I, O), where I specifies maximum input length\n that will be processed in that bucket, and O specifies maximum output\n length. Training instances that have inputs longer than I or outputs\n longer than O will be pushed to the next bucket and padded accordingly.\n We assume that the list is sorted, e.g., [(2, 4), (8, 16)].\n size: number of units in each layer of the model.\n num_layers: number of layers in the model.\n max_gradient_norm: gradients will be clipped to maximally this norm.\n batch_size: the size of the batches used during training;\n the model construction is independent of batch_size, so it can be\n changed after initialization if this is convenient, e.g., for decoding.\n learning_rate: learning rate to start with.\n use_lstm: if true, we use LSTM cells instead of GRU cells.\n num_samples: number of samples for sampled softmax.\n forward_only: if set, we do not construct the backward pass in the model.\n dtype: the data type to use to store internal variables.\n \"\"\"\n self.source_vocab_size = source_vocab_size\n self.target_vocab_size = target_vocab_size\n self.latent_dim = latent_dim\n self.buckets = buckets\n self.batch_size = batch_size\n self.word_dropout_keep_prob = word_dropout_keep_prob\n self.kl_min = kl_min\n feed_previous = feed_previous or forward_only\n\n self.learning_rate = tf.Variable(\n float(learning_rate), trainable=False, dtype=dtype)\n\n self.enc_embedding = tf.get_variable(\"enc_embedding\", [source_vocab_size, size], dtype=dtype, initializer=weight_initializer())\n\n self.dec_embedding = tf.get_variable(\"dec_embedding\", [target_vocab_size, size], dtype=dtype, initializer=weight_initializer())\n\n self.kl_rate = tf.Variable(\n 0.0, trainable=False, dtype=dtype)\n self.new_kl_rate = tf.placeholder(tf.float32, shape=[], name=\"new_kl_rate\")\n self.kl_rate_update = tf.assign(self.kl_rate, self.new_kl_rate)\n\n self.replace_input = tf.placeholder(tf.int32, shape=[None], name=\"replace_input\")\n replace_input = tf.nn.embedding_lookup(self.dec_embedding, self.replace_input)\n\n self.global_step = tf.Variable(0, trainable=False)\n\n # If we use sampled softmax, we need an output projection.\n output_projection = None\n softmax_loss_function = None\n # Sampled softmax only makes sense if we sample less than vocabulary size.\n if num_samples > 0 and num_samples < self.target_vocab_size:\n w_t = tf.get_variable(\"proj_w\", [self.target_vocab_size, size], dtype=dtype, initializer=weight_initializer())\n w = tf.transpose(w_t)\n b = tf.get_variable(\"proj_b\", [self.target_vocab_size], dtype=dtype, initializer=bias_initializer)\n output_projection = (w, b)\n\n def sampled_loss(inputs, labels):\n labels = tf.reshape(labels, [-1, 1])\n # We need to compute the sampled_softmax_loss using 32bit floats to\n # avoid numerical instabilities.\n local_w_t = tf.cast(w_t, tf.float32)\n local_b = tf.cast(b, tf.float32)\n local_inputs = tf.cast(inputs, tf.float32)\n return tf.cast(\n tf.nn.sampled_softmax_loss(local_w_t, local_b, local_inputs, labels,\n num_samples, self.target_vocab_size),\n dtype)\n softmax_loss_function = sampled_loss\n # Create the internal multi-layer cell for our RNN.\n single_cell = tf.nn.rnn_cell.GRUCell(size)\n if use_lstm:\n single_cell = tf.nn.rnn_cell.BasicLSTMCell(size)\n\n cell = single_cell\n\n def encoder_f(encoder_inputs):\n return seq2seq.embedding_encoder(\n encoder_inputs,\n cell,\n self.enc_embedding,\n num_symbols=source_vocab_size,\n embedding_size=size,\n bidirectional=bidirectional,\n weight_initializer=weight_initializer,\n dtype=dtype)\n\n def decoder_f(encoder_state, decoder_inputs):\n return seq2seq.embedding_rnn_decoder(\n decoder_inputs,\n encoder_state,\n cell,\n embedding=self.dec_embedding,\n word_dropout_keep_prob=word_dropout_keep_prob,\n replace_input=replace_input,\n num_symbols=target_vocab_size,\n embedding_size=size,\n output_projection=output_projection,\n feed_previous=feed_previous,\n weight_initializer=weight_initializer)\n\n def enc_latent_f(encoder_state):\n return seq2seq.encoder_to_latent(\n encoder_state,\n embedding_size=size,\n latent_dim=latent_dim,\n num_layers=num_layers,\n activation=activation,\n use_lstm=use_lstm,\n enc_state_bidirectional=bidirectional,\n dtype=dtype)\n\n def latent_dec_f(latent_vector):\n return seq2seq.latent_to_decoder(latent_vector,\n embedding_size=size,\n latent_dim=latent_dim,\n num_layers=num_layers,\n activation=activation,\n use_lstm=use_lstm,\n dtype=dtype)\n\n\n def sample_f(mean, logvar):\n return seq2seq.sample(\n mean,\n logvar,\n latent_dim,\n iaf,\n kl_min,\n anneal,\n self.kl_rate,\n dtype)\n\n # The seq2seq function: we use embedding for the input and attention.\n def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):\n return tf.nn.seq2seq.embedding_attention_seq2seq_f(\n encoder_inputs,\n decoder_inputs,\n cell,\n num_encoder_symbols=source_vocab_size,\n num_decoder_symbols=target_vocab_size,\n embedding_size=size,\n output_projection=output_projection,\n feed_previous=do_decode,\n dtype=dtype)\n\n\n # Feeds for inputs.\n self.encoder_inputs = []\n self.decoder_inputs = []\n self.target_weights = []\n for i in xrange(buckets[-1][0]): # Last bucket is the biggest one.\n self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],\n name=\"encoder{0}\".format(i)))\n for i in xrange(buckets[-1][1] + 1):\n self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None],\n name=\"decoder{0}\".format(i)))\n self.target_weights.append(tf.placeholder(dtype, shape=[None],\n name=\"weight{0}\".format(i)))\n\n # Our targets are decoder inputs shifted by one.\n targets = [self.decoder_inputs[i + 1]\n for i in xrange(len(self.decoder_inputs) - 1)]\n\n\n self.means, self.logvars = seq2seq.variational_encoder_with_buckets(\n self.encoder_inputs, buckets, encoder_f, enc_latent_f,\n softmax_loss_function=softmax_loss_function)\n self.outputs, self.losses, self.KL_objs, self.KL_costs = seq2seq.variational_decoder_with_buckets(\n self.means, self.logvars, self.decoder_inputs, targets,\n self.target_weights, buckets, decoder_f, latent_dec_f,\n sample_f, softmax_loss_function=softmax_loss_function)\n\n # If we use output projection, we need to project outputs for decoding.\n if output_projection is not None:\n for b in xrange(len(buckets)):\n self.outputs[b] = [\n tf.matmul(output, output_projection[0]) + output_projection[1]\n for output in self.outputs[b]\n ]\n # Gradients and SGD update operation for training the model.\n params = tf.trainable_variables()\n if not forward_only:\n self.gradient_norms = []\n self.updates = []\n for b in xrange(len(buckets)):\n total_loss = self.losses[b] + self.KL_objs[b]\n gradients = tf.gradients(total_loss, params)\n clipped_gradients, norm = tf.clip_by_global_norm(gradients,\n max_gradient_norm)\n self.gradient_norms.append(norm)\n self.updates.append(optimizer.apply_gradients(\n zip(clipped_gradients, params), global_step=self.global_step))\n\n self.saver = tf.train.Saver(tf.global_variables())","function_tokens":["def","__init__","(","self",",","source_vocab_size",",","target_vocab_size",",","buckets",",","size",",","num_layers",",","latent_dim",",","max_gradient_norm",",","batch_size",",","learning_rate",",","kl_min","=","2",",","word_dropout_keep_prob","=","1.0",",","anneal","=","False",",","kl_rate_rise_factor","=","None",",","use_lstm","=","False",",","num_samples","=","512",",","optimizer","=","None",",","activation","=","tf",".","nn",".","relu",",","forward_only","=","False",",","feed_previous","=","True",",","bidirectional","=","False",",","weight_initializer","=","None",",","bias_initializer","=","None",",","iaf","=","False",",","dtype","=","tf",".","float32",")",":","self",".","source_vocab_size","=","source_vocab_size","self",".","target_vocab_size","=","target_vocab_size","self",".","latent_dim","=","latent_dim","self",".","buckets","=","buckets","self",".","batch_size","=","batch_size","self",".","word_dropout_keep_prob","=","word_dropout_keep_prob","self",".","kl_min","=","kl_min","feed_previous","=","feed_previous","or","forward_only","self",".","learning_rate","=","tf",".","Variable","(","float","(","learning_rate",")",",","trainable","=","False",",","dtype","=","dtype",")","self",".","enc_embedding","=","tf",".","get_variable","(","\"enc_embedding\"",",","[","source_vocab_size",",","size","]",",","dtype","=","dtype",",","initializer","=","weight_initializer","(",")",")","self",".","dec_embedding","=","tf",".","get_variable","(","\"dec_embedding\"",",","[","target_vocab_size",",","size","]",",","dtype","=","dtype",",","initializer","=","weight_initializer","(",")",")","self",".","kl_rate","=","tf",".","Variable","(","0.0",",","trainable","=","False",",","dtype","=","dtype",")","self",".","new_kl_rate","=","tf",".","placeholder","(","tf",".","float32",",","shape","=","[","]",",","name","=","\"new_kl_rate\"",")","self",".","kl_rate_update","=","tf",".","assign","(","self",".","kl_rate",",","self",".","new_kl_rate",")","self",".","replace_input","=","tf",".","placeholder","(","tf",".","int32",",","shape","=","[","None","]",",","name","=","\"replace_input\"",")","replace_input","=","tf",".","nn",".","embedding_lookup","(","self",".","dec_embedding",",","self",".","replace_input",")","self",".","global_step","=","tf",".","Variable","(","0",",","trainable","=","False",")","# If we use sampled softmax, we need an output projection.","output_projection","=","None","softmax_loss_function","=","None","# Sampled softmax only makes sense if we sample less than vocabulary size.","if","num_samples",">","0","and","num_samples","<","self",".","target_vocab_size",":","w_t","=","tf",".","get_variable","(","\"proj_w\"",",","[","self",".","target_vocab_size",",","size","]",",","dtype","=","dtype",",","initializer","=","weight_initializer","(",")",")","w","=","tf",".","transpose","(","w_t",")","b","=","tf",".","get_variable","(","\"proj_b\"",",","[","self",".","target_vocab_size","]",",","dtype","=","dtype",",","initializer","=","bias_initializer",")","output_projection","=","(","w",",","b",")","def","sampled_loss","(","inputs",",","labels",")",":","labels","=","tf",".","reshape","(","labels",",","[","-","1",",","1","]",")","# We need to compute the sampled_softmax_loss using 32bit floats to","# avoid numerical instabilities.","local_w_t","=","tf",".","cast","(","w_t",",","tf",".","float32",")","local_b","=","tf",".","cast","(","b",",","tf",".","float32",")","local_inputs","=","tf",".","cast","(","inputs",",","tf",".","float32",")","return","tf",".","cast","(","tf",".","nn",".","sampled_softmax_loss","(","local_w_t",",","local_b",",","local_inputs",",","labels",",","num_samples",",","self",".","target_vocab_size",")",",","dtype",")","softmax_loss_function","=","sampled_loss","# Create the internal multi-layer cell for our RNN.","single_cell","=","tf",".","nn",".","rnn_cell",".","GRUCell","(","size",")","if","use_lstm",":","single_cell","=","tf",".","nn",".","rnn_cell",".","BasicLSTMCell","(","size",")","cell","=","single_cell","def","encoder_f","(","encoder_inputs",")",":","return","seq2seq",".","embedding_encoder","(","encoder_inputs",",","cell",",","self",".","enc_embedding",",","num_symbols","=","source_vocab_size",",","embedding_size","=","size",",","bidirectional","=","bidirectional",",","weight_initializer","=","weight_initializer",",","dtype","=","dtype",")","def","decoder_f","(","encoder_state",",","decoder_inputs",")",":","return","seq2seq",".","embedding_rnn_decoder","(","decoder_inputs",",","encoder_state",",","cell",",","embedding","=","self",".","dec_embedding",",","word_dropout_keep_prob","=","word_dropout_keep_prob",",","replace_input","=","replace_input",",","num_symbols","=","target_vocab_size",",","embedding_size","=","size",",","output_projection","=","output_projection",",","feed_previous","=","feed_previous",",","weight_initializer","=","weight_initializer",")","def","enc_latent_f","(","encoder_state",")",":","return","seq2seq",".","encoder_to_latent","(","encoder_state",",","embedding_size","=","size",",","latent_dim","=","latent_dim",",","num_layers","=","num_layers",",","activation","=","activation",",","use_lstm","=","use_lstm",",","enc_state_bidirectional","=","bidirectional",",","dtype","=","dtype",")","def","latent_dec_f","(","latent_vector",")",":","return","seq2seq",".","latent_to_decoder","(","latent_vector",",","embedding_size","=","size",",","latent_dim","=","latent_dim",",","num_layers","=","num_layers",",","activation","=","activation",",","use_lstm","=","use_lstm",",","dtype","=","dtype",")","def","sample_f","(","mean",",","logvar",")",":","return","seq2seq",".","sample","(","mean",",","logvar",",","latent_dim",",","iaf",",","kl_min",",","anneal",",","self",".","kl_rate",",","dtype",")","# The seq2seq function: we use embedding for the input and attention.","def","seq2seq_f","(","encoder_inputs",",","decoder_inputs",",","do_decode",")",":","return","tf",".","nn",".","seq2seq",".","embedding_attention_seq2seq_f","(","encoder_inputs",",","decoder_inputs",",","cell",",","num_encoder_symbols","=","source_vocab_size",",","num_decoder_symbols","=","target_vocab_size",",","embedding_size","=","size",",","output_projection","=","output_projection",",","feed_previous","=","do_decode",",","dtype","=","dtype",")","# Feeds for inputs.","self",".","encoder_inputs","=","[","]","self",".","decoder_inputs","=","[","]","self",".","target_weights","=","[","]","for","i","in","xrange","(","buckets","[","-","1","]","[","0","]",")",":","# Last bucket is the biggest one.","self",".","encoder_inputs",".","append","(","tf",".","placeholder","(","tf",".","int32",",","shape","=","[","None","]",",","name","=","\"encoder{0}\"",".","format","(","i",")",")",")","for","i","in","xrange","(","buckets","[","-","1","]","[","1","]","+","1",")",":","self",".","decoder_inputs",".","append","(","tf",".","placeholder","(","tf",".","int32",",","shape","=","[","None","]",",","name","=","\"decoder{0}\"",".","format","(","i",")",")",")","self",".","target_weights",".","append","(","tf",".","placeholder","(","dtype",",","shape","=","[","None","]",",","name","=","\"weight{0}\"",".","format","(","i",")",")",")","# Our targets are decoder inputs shifted by one.","targets","=","[","self",".","decoder_inputs","[","i","+","1","]","for","i","in","xrange","(","len","(","self",".","decoder_inputs",")","-","1",")","]","self",".","means",",","self",".","logvars","=","seq2seq",".","variational_encoder_with_buckets","(","self",".","encoder_inputs",",","buckets",",","encoder_f",",","enc_latent_f",",","softmax_loss_function","=","softmax_loss_function",")","self",".","outputs",",","self",".","losses",",","self",".","KL_objs",",","self",".","KL_costs","=","seq2seq",".","variational_decoder_with_buckets","(","self",".","means",",","self",".","logvars",",","self",".","decoder_inputs",",","targets",",","self",".","target_weights",",","buckets",",","decoder_f",",","latent_dec_f",",","sample_f",",","softmax_loss_function","=","softmax_loss_function",")","# If we use output projection, we need to project outputs for decoding.","if","output_projection","is","not","None",":","for","b","in","xrange","(","len","(","buckets",")",")",":","self",".","outputs","[","b","]","=","[","tf",".","matmul","(","output",",","output_projection","[","0","]",")","+","output_projection","[","1","]","for","output","in","self",".","outputs","[","b","]","]","# Gradients and SGD update operation for training the model.","params","=","tf",".","trainable_variables","(",")","if","not","forward_only",":","self",".","gradient_norms","=","[","]","self",".","updates","=","[","]","for","b","in","xrange","(","len","(","buckets",")",")",":","total_loss","=","self",".","losses","[","b","]","+","self",".","KL_objs","[","b","]","gradients","=","tf",".","gradients","(","total_loss",",","params",")","clipped_gradients",",","norm","=","tf",".","clip_by_global_norm","(","gradients",",","max_gradient_norm",")","self",".","gradient_norms",".","append","(","norm",")","self",".","updates",".","append","(","optimizer",".","apply_gradients","(","zip","(","clipped_gradients",",","params",")",",","global_step","=","self",".","global_step",")",")","self",".","saver","=","tf",".","train",".","Saver","(","tf",".","global_variables","(",")",")"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq_model.py#L47-L267"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq_model.py","language":"python","identifier":"Seq2SeqModel.step","parameters":"(self, session, encoder_inputs, decoder_inputs, target_weights,\n bucket_id, forward_only, prob, beam_size=1)","argument_list":"","return_statement":"","docstring":"Run a step of the model feeding the given inputs.\n \n Args:\n session: tensorflow session to use.\n encoder_inputs: list of numpy int vectors to feed as encoder inputs.\n decoder_inputs: list of numpy int vectors to feed as decoder inputs.\n target_weights: list of numpy float vectors to feed as target weights.\n bucket_id: which bucket of the model to use.\n forward_only: whether to do the backward step or only forward.\n \n Returns:\n A triple consisting of gradient norm (or None if we did not do backward),\n average perplexity, and the outputs.\n \n Raises:\n ValueError: if length of encoder_inputs, decoder_inputs, or\n target_weights disagrees with bucket size for the specified bucket_id.","docstring_summary":"Run a step of the model feeding the given inputs.\n \n Args:\n session: tensorflow session to use.\n encoder_inputs: list of numpy int vectors to feed as encoder inputs.\n decoder_inputs: list of numpy int vectors to feed as decoder inputs.\n target_weights: list of numpy float vectors to feed as target weights.\n bucket_id: which bucket of the model to use.\n forward_only: whether to do the backward step or only forward.\n \n Returns:\n A triple consisting of gradient norm (or None if we did not do backward),\n average perplexity, and the outputs.\n \n Raises:\n ValueError: if length of encoder_inputs, decoder_inputs, or\n target_weights disagrees with bucket size for the specified bucket_id.","docstring_tokens":["Run","a","step","of","the","model","feeding","the","given","inputs",".","Args",":","session",":","tensorflow","session","to","use",".","encoder_inputs",":","list","of","numpy","int","vectors","to","feed","as","encoder","inputs",".","decoder_inputs",":","list","of","numpy","int","vectors","to","feed","as","decoder","inputs",".","target_weights",":","list","of","numpy","float","vectors","to","feed","as","target","weights",".","bucket_id",":","which","bucket","of","the","model","to","use",".","forward_only",":","whether","to","do","the","backward","step","or","only","forward",".","Returns",":","A","triple","consisting","of","gradient","norm","(","or","None","if","we","did","not","do","backward",")","average","perplexity","and","the","outputs",".","Raises",":","ValueError",":","if","length","of","encoder_inputs","decoder_inputs","or","target_weights","disagrees","with","bucket","size","for","the","specified","bucket_id","."],"function":"def step(self, session, encoder_inputs, decoder_inputs, target_weights,\n bucket_id, forward_only, prob, beam_size=1):\n \"\"\"Run a step of the model feeding the given inputs.\n \n Args:\n session: tensorflow session to use.\n encoder_inputs: list of numpy int vectors to feed as encoder inputs.\n decoder_inputs: list of numpy int vectors to feed as decoder inputs.\n target_weights: list of numpy float vectors to feed as target weights.\n bucket_id: which bucket of the model to use.\n forward_only: whether to do the backward step or only forward.\n \n Returns:\n A triple consisting of gradient norm (or None if we did not do backward),\n average perplexity, and the outputs.\n \n Raises:\n ValueError: if length of encoder_inputs, decoder_inputs, or\n target_weights disagrees with bucket size for the specified bucket_id.\n \"\"\"\n # Check if the sizes match.\n encoder_size, decoder_size = self.buckets[bucket_id]\n if len(encoder_inputs) != encoder_size:\n raise ValueError(\"Encoder length must be equal to the one in bucket,\"\n \" %d != %d.\" % (len(encoder_inputs), encoder_size))\n if len(decoder_inputs) != decoder_size:\n raise ValueError(\"Decoder length must be equal to the one in bucket,\"\n \" %d != %d.\" % (len(decoder_inputs), decoder_size))\n if len(target_weights) != decoder_size:\n raise ValueError(\"Weights length must be equal to the one in bucket,\"\n \" %d != %d.\" % (len(target_weights), decoder_size))\n \n # Input feed: encoder inputs, decoder inputs, target_weights, as provided.\n input_feed = {}\n for l in xrange(encoder_size):\n input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]\n for l in xrange(decoder_size):\n input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]\n input_feed[self.target_weights[l].name] = target_weights[l]\n if self.word_dropout_keep_prob < 1:\n input_feed[self.replace_input.name] = np.full((self.batch_size), data_utils.UNK_ID, dtype=np.int32)\n \n # Since our targets are decoder inputs shifted by one, we need one more.\n last_target = self.decoder_inputs[decoder_size].name\n input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)\n if not prob:\n input_feed[self.logvars[bucket_id]] = np.full((self.batch_size, self.latent_dim), -800.0, dtype=np.float32)\n \n # Output feed: depends on whether we do a backward step or not.\n if not forward_only:\n output_feed = [self.updates[bucket_id], # Update Op that does SGD.\n self.gradient_norms[bucket_id], # Gradient norm.\n self.losses[bucket_id],\n self.KL_costs[bucket_id]] # Loss for this batch.\n else:\n output_feed = [self.losses[bucket_id], self.KL_costs[bucket_id]] # Loss for this batch.\n for l in xrange(decoder_size): # Output logits.\n output_feed.append(self.outputs[bucket_id][l])\n \n outputs = session.run(output_feed, input_feed)\n if not forward_only:\n return outputs[1], outputs[2], outputs[3], None # Gradient norm, loss, KL divergence, no outputs.\n else:\n return None, outputs[0], outputs[1], outputs[2:]","function_tokens":["def","step","(","self",",","session",",","encoder_inputs",",","decoder_inputs",",","target_weights",",","bucket_id",",","forward_only",",","prob",",","beam_size","=","1",")",":","# Check if the sizes match.","encoder_size",",","decoder_size","=","self",".","buckets","[","bucket_id","]","if","len","(","encoder_inputs",")","!=","encoder_size",":","raise","ValueError","(","\"Encoder length must be equal to the one in bucket,\"","\" %d != %d.\"","%","(","len","(","encoder_inputs",")",",","encoder_size",")",")","if","len","(","decoder_inputs",")","!=","decoder_size",":","raise","ValueError","(","\"Decoder length must be equal to the one in bucket,\"","\" %d != %d.\"","%","(","len","(","decoder_inputs",")",",","decoder_size",")",")","if","len","(","target_weights",")","!=","decoder_size",":","raise","ValueError","(","\"Weights length must be equal to the one in bucket,\"","\" %d != %d.\"","%","(","len","(","target_weights",")",",","decoder_size",")",")","# Input feed: encoder inputs, decoder inputs, target_weights, as provided.","input_feed","=","{","}","for","l","in","xrange","(","encoder_size",")",":","input_feed","[","self",".","encoder_inputs","[","l","]",".","name","]","=","encoder_inputs","[","l","]","for","l","in","xrange","(","decoder_size",")",":","input_feed","[","self",".","decoder_inputs","[","l","]",".","name","]","=","decoder_inputs","[","l","]","input_feed","[","self",".","target_weights","[","l","]",".","name","]","=","target_weights","[","l","]","if","self",".","word_dropout_keep_prob","<","1",":","input_feed","[","self",".","replace_input",".","name","]","=","np",".","full","(","(","self",".","batch_size",")",",","data_utils",".","UNK_ID",",","dtype","=","np",".","int32",")","# Since our targets are decoder inputs shifted by one, we need one more.","last_target","=","self",".","decoder_inputs","[","decoder_size","]",".","name","input_feed","[","last_target","]","=","np",".","zeros","(","[","self",".","batch_size","]",",","dtype","=","np",".","int32",")","if","not","prob",":","input_feed","[","self",".","logvars","[","bucket_id","]","]","=","np",".","full","(","(","self",".","batch_size",",","self",".","latent_dim",")",",","-","800.0",",","dtype","=","np",".","float32",")","# Output feed: depends on whether we do a backward step or not.","if","not","forward_only",":","output_feed","=","[","self",".","updates","[","bucket_id","]",",","# Update Op that does SGD.","self",".","gradient_norms","[","bucket_id","]",",","# Gradient norm.","self",".","losses","[","bucket_id","]",",","self",".","KL_costs","[","bucket_id","]","]","# Loss for this batch.","else",":","output_feed","=","[","self",".","losses","[","bucket_id","]",",","self",".","KL_costs","[","bucket_id","]","]","# Loss for this batch.","for","l","in","xrange","(","decoder_size",")",":","# Output logits.","output_feed",".","append","(","self",".","outputs","[","bucket_id","]","[","l","]",")","outputs","=","session",".","run","(","output_feed",",","input_feed",")","if","not","forward_only",":","return","outputs","[","1","]",",","outputs","[","2","]",",","outputs","[","3","]",",","None","# Gradient norm, loss, KL divergence, no outputs.","else",":","return","None",",","outputs","[","0","]",",","outputs","[","1","]",",","outputs","[","2",":","]"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq_model.py#L270-L333"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq_model.py","language":"python","identifier":"Seq2SeqModel.get_batch","parameters":"(self, data, bucket_id)","argument_list":"","return_statement":"return batch_encoder_inputs, batch_decoder_inputs, batch_weights","docstring":"Get a random batch of data from the specified bucket, prepare for step.\n\n To feed data in step(..) it must be a list of batch-major vectors, while\n data here contains single length-major cases. So the main logic of this\n function is to re-index data cases to be in the proper format for feeding.\n\n Args:\n data: a tuple of size len(self.buckets) in which each element contains\n lists of pairs of input and output data that we use to create a batch.\n bucket_id: integer, which bucket to get the batch for.\n\n Returns:\n The triple (encoder_inputs, decoder_inputs, target_weights) for\n the constructed batch that has the proper format to call step(...) later.","docstring_summary":"Get a random batch of data from the specified bucket, prepare for step.","docstring_tokens":["Get","a","random","batch","of","data","from","the","specified","bucket","prepare","for","step","."],"function":"def get_batch(self, data, bucket_id):\n \"\"\"Get a random batch of data from the specified bucket, prepare for step.\n\n To feed data in step(..) it must be a list of batch-major vectors, while\n data here contains single length-major cases. So the main logic of this\n function is to re-index data cases to be in the proper format for feeding.\n\n Args:\n data: a tuple of size len(self.buckets) in which each element contains\n lists of pairs of input and output data that we use to create a batch.\n bucket_id: integer, which bucket to get the batch for.\n\n Returns:\n The triple (encoder_inputs, decoder_inputs, target_weights) for\n the constructed batch that has the proper format to call step(...) later.\n \"\"\"\n encoder_size, decoder_size = self.buckets[bucket_id]\n encoder_inputs, decoder_inputs = [], []\n\n # Get a random batch of encoder and decoder inputs from data,\n # pad them if needed, reverse encoder inputs and add GO to decoder.\n for _ in xrange(self.batch_size):\n encoder_input, decoder_input = random.choice(data[bucket_id])\n\n # Encoder inputs are padded and then reversed.\n encoder_pad = [data_utils.PAD_ID] * (encoder_size - len(encoder_input))\n encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))\n\n # Decoder inputs get an extra \"GO\" symbol, and are padded then.\n decoder_pad_size = decoder_size - len(decoder_input) - 1\n decoder_inputs.append([data_utils.GO_ID] + decoder_input +\n [data_utils.PAD_ID] * decoder_pad_size)\n\n # Now we create batch-major vectors from the data selected above.\n batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []\n\n # Batch encoder inputs are just re-indexed encoder_inputs.\n for length_idx in xrange(encoder_size):\n batch_encoder_inputs.append(\n np.array([encoder_inputs[batch_idx][length_idx]\n for batch_idx in xrange(self.batch_size)], dtype=np.int32))\n\n # Batch decoder inputs are re-indexed decoder_inputs, we create weights.\n for length_idx in xrange(decoder_size):\n batch_decoder_inputs.append(\n np.array([decoder_inputs[batch_idx][length_idx]\n for batch_idx in xrange(self.batch_size)], dtype=np.int32))\n\n # Create target_weights to be 0 for targets that are padding.\n batch_weight = np.ones(self.batch_size, dtype=np.float32)\n for batch_idx in xrange(self.batch_size):\n # We set weight to 0 if the corresponding target is a PAD symbol.\n # The corresponding target is decoder_input shifted by 1 forward.\n if length_idx < decoder_size - 1:\n target = decoder_inputs[batch_idx][length_idx + 1]\n if length_idx == decoder_size - 1 or target == data_utils.PAD_ID:\n batch_weight[batch_idx] = 0.0\n batch_weights.append(batch_weight)\n return batch_encoder_inputs, batch_decoder_inputs, batch_weights","function_tokens":["def","get_batch","(","self",",","data",",","bucket_id",")",":","encoder_size",",","decoder_size","=","self",".","buckets","[","bucket_id","]","encoder_inputs",",","decoder_inputs","=","[","]",",","[","]","# Get a random batch of encoder and decoder inputs from data,","# pad them if needed, reverse encoder inputs and add GO to decoder.","for","_","in","xrange","(","self",".","batch_size",")",":","encoder_input",",","decoder_input","=","random",".","choice","(","data","[","bucket_id","]",")","# Encoder inputs are padded and then reversed.","encoder_pad","=","[","data_utils",".","PAD_ID","]","*","(","encoder_size","-","len","(","encoder_input",")",")","encoder_inputs",".","append","(","list","(","reversed","(","encoder_input","+","encoder_pad",")",")",")","# Decoder inputs get an extra \"GO\" symbol, and are padded then.","decoder_pad_size","=","decoder_size","-","len","(","decoder_input",")","-","1","decoder_inputs",".","append","(","[","data_utils",".","GO_ID","]","+","decoder_input","+","[","data_utils",".","PAD_ID","]","*","decoder_pad_size",")","# Now we create batch-major vectors from the data selected above.","batch_encoder_inputs",",","batch_decoder_inputs",",","batch_weights","=","[","]",",","[","]",",","[","]","# Batch encoder inputs are just re-indexed encoder_inputs.","for","length_idx","in","xrange","(","encoder_size",")",":","batch_encoder_inputs",".","append","(","np",".","array","(","[","encoder_inputs","[","batch_idx","]","[","length_idx","]","for","batch_idx","in","xrange","(","self",".","batch_size",")","]",",","dtype","=","np",".","int32",")",")","# Batch decoder inputs are re-indexed decoder_inputs, we create weights.","for","length_idx","in","xrange","(","decoder_size",")",":","batch_decoder_inputs",".","append","(","np",".","array","(","[","decoder_inputs","[","batch_idx","]","[","length_idx","]","for","batch_idx","in","xrange","(","self",".","batch_size",")","]",",","dtype","=","np",".","int32",")",")","# Create target_weights to be 0 for targets that are padding.","batch_weight","=","np",".","ones","(","self",".","batch_size",",","dtype","=","np",".","float32",")","for","batch_idx","in","xrange","(","self",".","batch_size",")",":","# We set weight to 0 if the corresponding target is a PAD symbol.","# The corresponding target is decoder_input shifted by 1 forward.","if","length_idx","<","decoder_size","-","1",":","target","=","decoder_inputs","[","batch_idx","]","[","length_idx","+","1","]","if","length_idx","==","decoder_size","-","1","or","target","==","data_utils",".","PAD_ID",":","batch_weight","[","batch_idx","]","=","0.0","batch_weights",".","append","(","batch_weight",")","return","batch_encoder_inputs",",","batch_decoder_inputs",",","batch_weights"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq_model.py#L378-L436"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"vrae.py","language":"python","identifier":"read_data","parameters":"(source_path, target_path, config, max_size=None)","argument_list":"","return_statement":"return data_set","docstring":"Read data from source and target files and put into buckets.\n\n Args:\n source_path: path to the files with token-ids for the source language.\n target_path: path to the file with token-ids for the target language;\n it must be aligned with the source file: n-th line contains the desired\n output for n-th line from the source_path.\n max_size: maximum number of lines to read, all other will be ignored;\n if 0 or None, data files will be read completely (no limit).\n\n Returns:\n data_set: a list of length len(config.buckets); data_set[n] contains a list of\n (source, target) pairs read from the provided data files that fit\n into the n-th bucket, i.e., such that len(source) < config.buckets[n][0] and\n len(target) < config.buckets[n][1]; source and target are lists of token-ids.","docstring_summary":"Read data from source and target files and put into buckets.","docstring_tokens":["Read","data","from","source","and","target","files","and","put","into","buckets","."],"function":"def read_data(source_path, target_path, config, max_size=None):\n \"\"\"Read data from source and target files and put into buckets.\n\n Args:\n source_path: path to the files with token-ids for the source language.\n target_path: path to the file with token-ids for the target language;\n it must be aligned with the source file: n-th line contains the desired\n output for n-th line from the source_path.\n max_size: maximum number of lines to read, all other will be ignored;\n if 0 or None, data files will be read completely (no limit).\n\n Returns:\n data_set: a list of length len(config.buckets); data_set[n] contains a list of\n (source, target) pairs read from the provided data files that fit\n into the n-th bucket, i.e., such that len(source) < config.buckets[n][0] and\n len(target) < config.buckets[n][1]; source and target are lists of token-ids.\n \"\"\"\n data_set = [[] for _ in config.buckets]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline()\n counter = 0\n while source and target and (not max_size or counter < max_size):\n counter += 1\n if counter % 100000 == 0:\n print(\" reading data line %d\" % counter)\n sys.stdout.flush()\n source_ids = [int(x) for x in source.split()]\n target_ids = [int(x) for x in target.split()]\n target_ids.append(data_utils.EOS_ID)\n for bucket_id, (source_size, target_size) in enumerate(config.buckets):\n if len(source_ids) < source_size and len(target_ids) < target_size:\n data_set[bucket_id].append([source_ids, target_ids])\n break\n source, target = source_file.readline(), target_file.readline()\n return data_set","function_tokens":["def","read_data","(","source_path",",","target_path",",","config",",","max_size","=","None",")",":","data_set","=","[","[","]","for","_","in","config",".","buckets","]","with","tf",".","gfile",".","GFile","(","source_path",",","mode","=","\"r\"",")","as","source_file",":","with","tf",".","gfile",".","GFile","(","target_path",",","mode","=","\"r\"",")","as","target_file",":","source",",","target","=","source_file",".","readline","(",")",",","target_file",".","readline","(",")","counter","=","0","while","source","and","target","and","(","not","max_size","or","counter","<","max_size",")",":","counter","+=","1","if","counter","%","100000","==","0",":","print","(","\" reading data line %d\"","%","counter",")","sys",".","stdout",".","flush","(",")","source_ids","=","[","int","(","x",")","for","x","in","source",".","split","(",")","]","target_ids","=","[","int","(","x",")","for","x","in","target",".","split","(",")","]","target_ids",".","append","(","data_utils",".","EOS_ID",")","for","bucket_id",",","(","source_size",",","target_size",")","in","enumerate","(","config",".","buckets",")",":","if","len","(","source_ids",")","<","source_size","and","len","(","target_ids",")","<","target_size",":","data_set","[","bucket_id","]",".","append","(","[","source_ids",",","target_ids","]",")","break","source",",","target","=","source_file",".","readline","(",")",",","target_file",".","readline","(",")","return","data_set"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/vrae.py#L67-L102"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"vrae.py","language":"python","identifier":"create_model","parameters":"(session, config, forward_only)","argument_list":"","return_statement":"return model","docstring":"Create translation model and initialize or load parameters in session.","docstring_summary":"Create translation model and initialize or load parameters in session.","docstring_tokens":["Create","translation","model","and","initialize","or","load","parameters","in","session","."],"function":"def create_model(session, config, forward_only):\n \"\"\"Create translation model and initialize or load parameters in session.\"\"\"\n dtype = tf.float32\n optimizer = None\n if not forward_only:\n optimizer = tf.train.AdamOptimizer(config.learning_rate)\n if config.activation == \"elu\":\n activation = tf.nn.elu\n elif config.activation == \"prelu\":\n activation = prelu\n else:\n activation = tf.identity\n\n weight_initializer = tf.orthogonal_initializer if config.orthogonal_initializer else tf.uniform_unit_scaling_initializer\n bias_initializer = tf.zeros_initializer\n\n model = seq2seq_model.Seq2SeqModel(\n config.en_vocab_size,\n config.fr_vocab_size,\n config.buckets,\n config.size,\n config.num_layers,\n config.latent_dim,\n config.max_gradient_norm,\n config.batch_size,\n config.learning_rate,\n config.kl_min,\n config.word_dropout_keep_prob,\n config.anneal,\n config.use_lstm,\n optimizer=optimizer,\n activation=activation,\n forward_only=forward_only,\n feed_previous=config.feed_previous,\n bidirectional=config.bidirectional,\n weight_initializer=weight_initializer,\n bias_initializer=bias_initializer,\n iaf=config.iaf,\n dtype=dtype)\n ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir)\n if not FLAGS.new and ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):\n print(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n print(\"Created model with fresh parameters.\")\n session.run(tf.global_variables_initializer())\n return model","function_tokens":["def","create_model","(","session",",","config",",","forward_only",")",":","dtype","=","tf",".","float32","optimizer","=","None","if","not","forward_only",":","optimizer","=","tf",".","train",".","AdamOptimizer","(","config",".","learning_rate",")","if","config",".","activation","==","\"elu\"",":","activation","=","tf",".","nn",".","elu","elif","config",".","activation","==","\"prelu\"",":","activation","=","prelu","else",":","activation","=","tf",".","identity","weight_initializer","=","tf",".","orthogonal_initializer","if","config",".","orthogonal_initializer","else","tf",".","uniform_unit_scaling_initializer","bias_initializer","=","tf",".","zeros_initializer","model","=","seq2seq_model",".","Seq2SeqModel","(","config",".","en_vocab_size",",","config",".","fr_vocab_size",",","config",".","buckets",",","config",".","size",",","config",".","num_layers",",","config",".","latent_dim",",","config",".","max_gradient_norm",",","config",".","batch_size",",","config",".","learning_rate",",","config",".","kl_min",",","config",".","word_dropout_keep_prob",",","config",".","anneal",",","config",".","use_lstm",",","optimizer","=","optimizer",",","activation","=","activation",",","forward_only","=","forward_only",",","feed_previous","=","config",".","feed_previous",",","bidirectional","=","config",".","bidirectional",",","weight_initializer","=","weight_initializer",",","bias_initializer","=","bias_initializer",",","iaf","=","config",".","iaf",",","dtype","=","dtype",")","ckpt","=","tf",".","train",".","get_checkpoint_state","(","FLAGS",".","model_dir",")","if","not","FLAGS",".","new","and","ckpt","and","tf",".","train",".","checkpoint_exists","(","ckpt",".","model_checkpoint_path",")",":","print","(","\"Reading model parameters from %s\"","%","ckpt",".","model_checkpoint_path",")","model",".","saver",".","restore","(","session",",","ckpt",".","model_checkpoint_path",")","else",":","print","(","\"Created model with fresh parameters.\"",")","session",".","run","(","tf",".","global_variables_initializer","(",")",")","return","model"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/vrae.py#L105-L151"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"vrae.py","language":"python","identifier":"train","parameters":"(config)","argument_list":"","return_statement":"","docstring":"Train a en->fr translation model using WMT data.","docstring_summary":"Train a en->fr translation model using WMT data.","docstring_tokens":["Train","a","en","-",">","fr","translation","model","using","WMT","data","."],"function":"def train(config):\n \"\"\"Train a en->fr translation model using WMT data.\"\"\"\n # Prepare WMT data.\n print(\"Preparing WMT data in %s\" % config.data_dir)\n en_train, fr_train, en_dev, fr_dev, _, _ = data_utils.prepare_wmt_data(\n config.data_dir, config.en_vocab_size, config.fr_vocab_size, config.load_embeddings)\n\n with tf.Session() as sess:\n if not os.path.exists(FLAGS.model_dir):\n os.makedirs(FLAGS.model_dir)\n\n # Create model.\n print(\"Creating %d layers of %d units.\" % (config.num_layers, config.size))\n model = create_model(sess, config, False)\n\n if not config.probabilistic:\n self.kl_rate_update(0.0)\n\n train_writer = tf.summary.FileWriter(os.path.join(FLAGS.model_dir,\"train\"), graph=sess.graph)\n dev_writer = tf.summary.FileWriter(os.path.join(FLAGS.model_dir, \"test\"), graph=sess.graph)\n\n # Read data into buckets and compute their sizes.\n print (\"Reading development and training data (limit: %d).\"\n % config.max_train_data_size)\n\n dev_set = read_data(en_dev, fr_dev, config)\n train_set = read_data(en_train, fr_train, config, config.max_train_data_size)\n train_bucket_sizes = [len(train_set[b]) for b in xrange(len(config.buckets))]\n train_total_size = float(sum(train_bucket_sizes))\n\n # A bucket scale is a list of increasing numbers from 0 to 1 that we'll use\n # to select a bucket. Length of [scale[i], scale[i+1]] is proportional to\n # the size if i-th training bucket, as used later.\n train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) \/ train_total_size\n for i in xrange(len(train_bucket_sizes))]\n\n # This is the training loop.\n step_time, loss = 0.0, 0.0\n KL_loss = 0.0\n current_step = model.global_step.eval()\n step_loss_summaries = []\n step_KL_loss_summaries = []\n overall_start_time = time.time()\n while True:\n # Choose a bucket according to data distribution. We pick a random number\n # in [0, 1] and use the corresponding interval in train_buckets_scale.\n random_number_01 = np.random.random_sample()\n bucket_id = min([i for i in xrange(len(train_buckets_scale))\n if train_buckets_scale[i] > random_number_01])\n\n # Get a batch and make a step.\n start_time = time.time()\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(\n train_set, bucket_id)\n _, step_loss, step_KL_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,\n target_weights, bucket_id, False, config.probabilistic)\n\n if config.anneal and model.global_step.eval() > config.kl_rate_rise_time and model.kl_rate < 1:\n new_kl_rate = model.kl_rate.eval() + config.kl_rate_rise_factor\n sess.run(model.kl_rate_update, feed_dict={'new_kl_rate': new_kl_rate})\n\n step_time += (time.time() - start_time) \/ config.steps_per_checkpoint\n step_loss_summaries.append(tf.Summary(value=[tf.Summary.Value(tag=\"step loss\", simple_value=float(step_loss))]))\n step_KL_loss_summaries.append(tf.Summary(value=[tf.Summary.Value(tag=\"KL step loss\", simple_value=float(step_KL_loss))]))\n loss += step_loss \/ config.steps_per_checkpoint\n KL_loss += step_KL_loss \/ config.steps_per_checkpoint\n current_step = model.global_step.eval()\n\n # Once in a while, we save checkpoint, print statistics, and run evals.\n if current_step % config.steps_per_checkpoint == 0:\n # Print statistics for the previous epoch.\n perplexity = math.exp(float(loss)) if loss < 300 else float(\"inf\")\n print (\"global step %d learning rate %.4f step-time %.2f perplexity \"\n \"%.2f\" % (model.global_step.eval(), model.learning_rate.eval(),\n step_time, perplexity))\n\n print (\"global step %d learning rate %.4f step-time %.2f KL divergence \"\n \"%.2f\" % (model.global_step.eval(), model.learning_rate.eval(),\n step_time, KL_loss))\n wall_time = time.time() - overall_start_time\n print(\"time passed: {0}\".format(wall_time))\n\n # Add perplexity, KL divergence to summary and stats.\n perp_summary = tf.Summary(value=[tf.Summary.Value(tag=\"train perplexity\", simple_value=perplexity)])\n train_writer.add_summary(perp_summary, current_step)\n KL_loss_summary = tf.Summary(value=[tf.Summary.Value(tag=\"KL divergence\", simple_value=KL_loss)])\n train_writer.add_summary(KL_loss_summary, current_step)\n for i, summary in enumerate(step_loss_summaries):\n train_writer.add_summary(summary, current_step - 200 + i)\n step_loss_summaries = []\n for i, summary in enumerate(step_KL_loss_summaries):\n train_writer.add_summary(summary, current_step - 200 + i)\n step_KL_loss_summaries = []\n\n # Save checkpoint and zero timer and loss.\n checkpoint_path = os.path.join(FLAGS.model_dir, FLAGS.model_name + \".ckpt\")\n model.saver.save(sess, checkpoint_path, global_step=model.global_step)\n step_time, loss, KL_loss = 0.0, 0.0, 0.0\n\n # Run evals on development set and print their perplexity.\n eval_losses = []\n eval_KL_losses = []\n eval_bucket_num = 0\n for bucket_id in xrange(len(config.buckets)):\n if len(dev_set[bucket_id]) == 0:\n print(\" eval: empty bucket %d\" % (bucket_id))\n continue\n eval_bucket_num += 1\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(\n dev_set, bucket_id)\n _, eval_loss, eval_KL_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,\n target_weights, bucket_id, True, config.probabilistic)\n eval_losses.append(float(eval_loss))\n eval_KL_losses.append(float(eval_KL_loss))\n eval_ppx = math.exp(float(eval_loss)) if eval_loss < 300 else float(\n \"inf\")\n print(\" eval: bucket %d perplexity %.2f\" % (bucket_id, eval_ppx))\n\n eval_perp_summary = tf.Summary(value=[tf.Summary.Value(tag=\"eval perplexity for bucket {0}\".format(bucket_id), simple_value=eval_ppx)])\n dev_writer.add_summary(eval_perp_summary, current_step)\n\n mean_eval_loss = sum(eval_losses) \/ float(eval_bucket_num)\n mean_eval_KL_loss = sum(eval_KL_losses) \/ float(eval_bucket_num)\n mean_eval_ppx = math.exp(float(mean_eval_loss))\n print(\" eval: mean perplexity {0}\".format(mean_eval_ppx))\n\n eval_loss_summary = tf.Summary(value=[tf.Summary.Value(tag=\"mean eval loss\", simple_value=float(mean_eval_ppx))])\n dev_writer.add_summary(eval_loss_summary, current_step)\n eval_KL_loss_summary = tf.Summary(value=[tf.Summary.Value(tag=\"mean eval loss\", simple_value=float(mean_eval_KL_loss))])\n dev_writer.add_summary(eval_KL_loss_summary, current_step)","function_tokens":["def","train","(","config",")",":","# Prepare WMT data.","print","(","\"Preparing WMT data in %s\"","%","config",".","data_dir",")","en_train",",","fr_train",",","en_dev",",","fr_dev",",","_",",","_","=","data_utils",".","prepare_wmt_data","(","config",".","data_dir",",","config",".","en_vocab_size",",","config",".","fr_vocab_size",",","config",".","load_embeddings",")","with","tf",".","Session","(",")","as","sess",":","if","not","os",".","path",".","exists","(","FLAGS",".","model_dir",")",":","os",".","makedirs","(","FLAGS",".","model_dir",")","# Create model.","print","(","\"Creating %d layers of %d units.\"","%","(","config",".","num_layers",",","config",".","size",")",")","model","=","create_model","(","sess",",","config",",","False",")","if","not","config",".","probabilistic",":","self",".","kl_rate_update","(","0.0",")","train_writer","=","tf",".","summary",".","FileWriter","(","os",".","path",".","join","(","FLAGS",".","model_dir",",","\"train\"",")",",","graph","=","sess",".","graph",")","dev_writer","=","tf",".","summary",".","FileWriter","(","os",".","path",".","join","(","FLAGS",".","model_dir",",","\"test\"",")",",","graph","=","sess",".","graph",")","# Read data into buckets and compute their sizes.","print","(","\"Reading development and training data (limit: %d).\"","%","config",".","max_train_data_size",")","dev_set","=","read_data","(","en_dev",",","fr_dev",",","config",")","train_set","=","read_data","(","en_train",",","fr_train",",","config",",","config",".","max_train_data_size",")","train_bucket_sizes","=","[","len","(","train_set","[","b","]",")","for","b","in","xrange","(","len","(","config",".","buckets",")",")","]","train_total_size","=","float","(","sum","(","train_bucket_sizes",")",")","# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use","# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to","# the size if i-th training bucket, as used later.","train_buckets_scale","=","[","sum","(","train_bucket_sizes","[",":","i","+","1","]",")","\/","train_total_size","for","i","in","xrange","(","len","(","train_bucket_sizes",")",")","]","# This is the training loop.","step_time",",","loss","=","0.0",",","0.0","KL_loss","=","0.0","current_step","=","model",".","global_step",".","eval","(",")","step_loss_summaries","=","[","]","step_KL_loss_summaries","=","[","]","overall_start_time","=","time",".","time","(",")","while","True",":","# Choose a bucket according to data distribution. We pick a random number","# in [0, 1] and use the corresponding interval in train_buckets_scale.","random_number_01","=","np",".","random",".","random_sample","(",")","bucket_id","=","min","(","[","i","for","i","in","xrange","(","len","(","train_buckets_scale",")",")","if","train_buckets_scale","[","i","]",">","random_number_01","]",")","# Get a batch and make a step.","start_time","=","time",".","time","(",")","encoder_inputs",",","decoder_inputs",",","target_weights","=","model",".","get_batch","(","train_set",",","bucket_id",")","_",",","step_loss",",","step_KL_loss",",","_","=","model",".","step","(","sess",",","encoder_inputs",",","decoder_inputs",",","target_weights",",","bucket_id",",","False",",","config",".","probabilistic",")","if","config",".","anneal","and","model",".","global_step",".","eval","(",")",">","config",".","kl_rate_rise_time","and","model",".","kl_rate","<","1",":","new_kl_rate","=","model",".","kl_rate",".","eval","(",")","+","config",".","kl_rate_rise_factor","sess",".","run","(","model",".","kl_rate_update",",","feed_dict","=","{","'new_kl_rate'",":","new_kl_rate","}",")","step_time","+=","(","time",".","time","(",")","-","start_time",")","\/","config",".","steps_per_checkpoint","step_loss_summaries",".","append","(","tf",".","Summary","(","value","=","[","tf",".","Summary",".","Value","(","tag","=","\"step loss\"",",","simple_value","=","float","(","step_loss",")",")","]",")",")","step_KL_loss_summaries",".","append","(","tf",".","Summary","(","value","=","[","tf",".","Summary",".","Value","(","tag","=","\"KL step loss\"",",","simple_value","=","float","(","step_KL_loss",")",")","]",")",")","loss","+=","step_loss","\/","config",".","steps_per_checkpoint","KL_loss","+=","step_KL_loss","\/","config",".","steps_per_checkpoint","current_step","=","model",".","global_step",".","eval","(",")","# Once in a while, we save checkpoint, print statistics, and run evals.","if","current_step","%","config",".","steps_per_checkpoint","==","0",":","# Print statistics for the previous epoch.","perplexity","=","math",".","exp","(","float","(","loss",")",")","if","loss","<","300","else","float","(","\"inf\"",")","print","(","\"global step %d learning rate %.4f step-time %.2f perplexity \"","\"%.2f\"","%","(","model",".","global_step",".","eval","(",")",",","model",".","learning_rate",".","eval","(",")",",","step_time",",","perplexity",")",")","print","(","\"global step %d learning rate %.4f step-time %.2f KL divergence \"","\"%.2f\"","%","(","model",".","global_step",".","eval","(",")",",","model",".","learning_rate",".","eval","(",")",",","step_time",",","KL_loss",")",")","wall_time","=","time",".","time","(",")","-","overall_start_time","print","(","\"time passed: {0}\"",".","format","(","wall_time",")",")","# Add perplexity, KL divergence to summary and stats.","perp_summary","=","tf",".","Summary","(","value","=","[","tf",".","Summary",".","Value","(","tag","=","\"train perplexity\"",",","simple_value","=","perplexity",")","]",")","train_writer",".","add_summary","(","perp_summary",",","current_step",")","KL_loss_summary","=","tf",".","Summary","(","value","=","[","tf",".","Summary",".","Value","(","tag","=","\"KL divergence\"",",","simple_value","=","KL_loss",")","]",")","train_writer",".","add_summary","(","KL_loss_summary",",","current_step",")","for","i",",","summary","in","enumerate","(","step_loss_summaries",")",":","train_writer",".","add_summary","(","summary",",","current_step","-","200","+","i",")","step_loss_summaries","=","[","]","for","i",",","summary","in","enumerate","(","step_KL_loss_summaries",")",":","train_writer",".","add_summary","(","summary",",","current_step","-","200","+","i",")","step_KL_loss_summaries","=","[","]","# Save checkpoint and zero timer and loss.","checkpoint_path","=","os",".","path",".","join","(","FLAGS",".","model_dir",",","FLAGS",".","model_name","+","\".ckpt\"",")","model",".","saver",".","save","(","sess",",","checkpoint_path",",","global_step","=","model",".","global_step",")","step_time",",","loss",",","KL_loss","=","0.0",",","0.0",",","0.0","# Run evals on development set and print their perplexity.","eval_losses","=","[","]","eval_KL_losses","=","[","]","eval_bucket_num","=","0","for","bucket_id","in","xrange","(","len","(","config",".","buckets",")",")",":","if","len","(","dev_set","[","bucket_id","]",")","==","0",":","print","(","\" eval: empty bucket %d\"","%","(","bucket_id",")",")","continue","eval_bucket_num","+=","1","encoder_inputs",",","decoder_inputs",",","target_weights","=","model",".","get_batch","(","dev_set",",","bucket_id",")","_",",","eval_loss",",","eval_KL_loss",",","_","=","model",".","step","(","sess",",","encoder_inputs",",","decoder_inputs",",","target_weights",",","bucket_id",",","True",",","config",".","probabilistic",")","eval_losses",".","append","(","float","(","eval_loss",")",")","eval_KL_losses",".","append","(","float","(","eval_KL_loss",")",")","eval_ppx","=","math",".","exp","(","float","(","eval_loss",")",")","if","eval_loss","<","300","else","float","(","\"inf\"",")","print","(","\" eval: bucket %d perplexity %.2f\"","%","(","bucket_id",",","eval_ppx",")",")","eval_perp_summary","=","tf",".","Summary","(","value","=","[","tf",".","Summary",".","Value","(","tag","=","\"eval perplexity for bucket {0}\"",".","format","(","bucket_id",")",",","simple_value","=","eval_ppx",")","]",")","dev_writer",".","add_summary","(","eval_perp_summary",",","current_step",")","mean_eval_loss","=","sum","(","eval_losses",")","\/","float","(","eval_bucket_num",")","mean_eval_KL_loss","=","sum","(","eval_KL_losses",")","\/","float","(","eval_bucket_num",")","mean_eval_ppx","=","math",".","exp","(","float","(","mean_eval_loss",")",")","print","(","\" eval: mean perplexity {0}\"",".","format","(","mean_eval_ppx",")",")","eval_loss_summary","=","tf",".","Summary","(","value","=","[","tf",".","Summary",".","Value","(","tag","=","\"mean eval loss\"",",","simple_value","=","float","(","mean_eval_ppx",")",")","]",")","dev_writer",".","add_summary","(","eval_loss_summary",",","current_step",")","eval_KL_loss_summary","=","tf",".","Summary","(","value","=","[","tf",".","Summary",".","Value","(","tag","=","\"mean eval loss\"",",","simple_value","=","float","(","mean_eval_KL_loss",")",")","]",")","dev_writer",".","add_summary","(","eval_KL_loss_summary",",","current_step",")"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/vrae.py#L154-L283"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq.py","language":"python","identifier":"_extract_argmax_and_embed","parameters":"(embedding, output_projection=None,\n update_embedding=True)","argument_list":"","return_statement":"return loop_function","docstring":"Get a loop_function that extracts the previous symbol and embeds it.\n\n Args:\n embedding: embedding tensor for symbols.\n output_projection: None or a pair (W, B). If provided, each fed previous\n output will first be multiplied by W and added B.\n update_embedding: Boolean; if False, the gradients will not propagate\n through the embeddings.\n\n Returns:\n A loop function.","docstring_summary":"Get a loop_function that extracts the previous symbol and embeds it.","docstring_tokens":["Get","a","loop_function","that","extracts","the","previous","symbol","and","embeds","it","."],"function":"def _extract_argmax_and_embed(embedding, output_projection=None,\n update_embedding=True):\n \"\"\"Get a loop_function that extracts the previous symbol and embeds it.\n\n Args:\n embedding: embedding tensor for symbols.\n output_projection: None or a pair (W, B). If provided, each fed previous\n output will first be multiplied by W and added B.\n update_embedding: Boolean; if False, the gradients will not propagate\n through the embeddings.\n\n Returns:\n A loop function.\n \"\"\"\n def loop_function(prev, _):\n if output_projection is not None:\n prev = nn_ops.xw_plus_b(\n prev, output_projection[0], output_projection[1])\n prev_symbol = math_ops.argmax(prev, 1)\n # Note that gradients will not propagate through the second parameter of\n # embedding_lookup.\n emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)\n if not update_embedding:\n emb_prev = array_ops.stop_gradient(emb_prev)\n return emb_prev\n return loop_function","function_tokens":["def","_extract_argmax_and_embed","(","embedding",",","output_projection","=","None",",","update_embedding","=","True",")",":","def","loop_function","(","prev",",","_",")",":","if","output_projection","is","not","None",":","prev","=","nn_ops",".","xw_plus_b","(","prev",",","output_projection","[","0","]",",","output_projection","[","1","]",")","prev_symbol","=","math_ops",".","argmax","(","prev",",","1",")","# Note that gradients will not propagate through the second parameter of","# embedding_lookup.","emb_prev","=","embedding_ops",".","embedding_lookup","(","embedding",",","prev_symbol",")","if","not","update_embedding",":","emb_prev","=","array_ops",".","stop_gradient","(","emb_prev",")","return","emb_prev","return","loop_function"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq.py#L95-L120"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq.py","language":"python","identifier":"rnn_decoder","parameters":"(decoder_inputs, initial_state, cell, word_dropout_keep_prob=1, replace_inp=None,\n loop_function=None, scope=None)","argument_list":"","return_statement":"return outputs, state","docstring":"RNN decoder for the sequence-to-sequence model.\n\n Args:\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n initial_state: 2D Tensor with shape [batch_size x cell.state_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n loop_function: If not None, this function will be applied to the i-th output\n in order to generate the i+1-st input, and decoder_inputs will be ignored,\n except for the first element (\"GO\" symbol). This can be used for decoding,\n but also for training to emulate http:\/\/arxiv.org\/abs\/1506.03099.\n Signature -- loop_function(prev, i) = next\n * prev is a 2D Tensor of shape [batch_size x output_size],\n * i is an integer, the step number (when advanced control is needed),\n * next is a 2D Tensor of shape [batch_size x input_size].\n scope: VariableScope for the created subgraph; defaults to \"rnn_decoder\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing generated outputs.\n state: The state of each cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n (Note that in some cases, like basic RNN cell or GRU cell, outputs and\n states can be the same. They are different for LSTM cells though.)","docstring_summary":"RNN decoder for the sequence-to-sequence model.","docstring_tokens":["RNN","decoder","for","the","sequence","-","to","-","sequence","model","."],"function":"def rnn_decoder(decoder_inputs, initial_state, cell, word_dropout_keep_prob=1, replace_inp=None,\n loop_function=None, scope=None):\n \"\"\"RNN decoder for the sequence-to-sequence model.\n\n Args:\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n initial_state: 2D Tensor with shape [batch_size x cell.state_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n loop_function: If not None, this function will be applied to the i-th output\n in order to generate the i+1-st input, and decoder_inputs will be ignored,\n except for the first element (\"GO\" symbol). This can be used for decoding,\n but also for training to emulate http:\/\/arxiv.org\/abs\/1506.03099.\n Signature -- loop_function(prev, i) = next\n * prev is a 2D Tensor of shape [batch_size x output_size],\n * i is an integer, the step number (when advanced control is needed),\n * next is a 2D Tensor of shape [batch_size x input_size].\n scope: VariableScope for the created subgraph; defaults to \"rnn_decoder\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing generated outputs.\n state: The state of each cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n (Note that in some cases, like basic RNN cell or GRU cell, outputs and\n states can be the same. They are different for LSTM cells though.)\n \"\"\"\n with variable_scope.variable_scope(scope or \"rnn_decoder\"):\n state = initial_state\n outputs = []\n prev = None\n seq_len = len(decoder_inputs)\n keep = tf.select(tf.random_uniform([seq_len]) < word_dropout_keep_prob,\n tf.fill([seq_len], True), tf.fill([seq_len], False))\n for i, inp in enumerate(decoder_inputs):\n if loop_function is not None and prev is not None:\n with variable_scope.variable_scope(\"loop_function\", reuse=True):\n if word_dropout_keep_prob < 1:\n inp = tf.cond(keep[i], lambda: loop_function(prev, i), lambda: replace_inp)\n else:\n inp = loop_function(prev, i)\n if i > 0:\n variable_scope.get_variable_scope().reuse_variables()\n output, state = cell(inp, state)\n outputs.append(output)\n if loop_function is not None:\n prev = output\n return outputs, state","function_tokens":["def","rnn_decoder","(","decoder_inputs",",","initial_state",",","cell",",","word_dropout_keep_prob","=","1",",","replace_inp","=","None",",","loop_function","=","None",",","scope","=","None",")",":","with","variable_scope",".","variable_scope","(","scope","or","\"rnn_decoder\"",")",":","state","=","initial_state","outputs","=","[","]","prev","=","None","seq_len","=","len","(","decoder_inputs",")","keep","=","tf",".","select","(","tf",".","random_uniform","(","[","seq_len","]",")","<","word_dropout_keep_prob",",","tf",".","fill","(","[","seq_len","]",",","True",")",",","tf",".","fill","(","[","seq_len","]",",","False",")",")","for","i",",","inp","in","enumerate","(","decoder_inputs",")",":","if","loop_function","is","not","None","and","prev","is","not","None",":","with","variable_scope",".","variable_scope","(","\"loop_function\"",",","reuse","=","True",")",":","if","word_dropout_keep_prob","<","1",":","inp","=","tf",".","cond","(","keep","[","i","]",",","lambda",":","loop_function","(","prev",",","i",")",",","lambda",":","replace_inp",")","else",":","inp","=","loop_function","(","prev",",","i",")","if","i",">","0",":","variable_scope",".","get_variable_scope","(",")",".","reuse_variables","(",")","output",",","state","=","cell","(","inp",",","state",")","outputs",".","append","(","output",")","if","loop_function","is","not","None",":","prev","=","output","return","outputs",",","state"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq.py#L123-L170"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq.py","language":"python","identifier":"beam_rnn_decoder","parameters":"(decoder_inputs, initial_state, cell, loop_function=None,\n scope=None,output_projection=None, beam_size=1)","argument_list":"","return_statement":"return outputs, state, tf.reshape(tf.concat(0, beam_path),[-1,beam_size]), tf.reshape(tf.concat(0, beam_symbols),[-1,beam_size])","docstring":"RNN decoder for the sequence-to-sequence model.\n\n Args:\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n initial_state: 2D Tensor with shape [batch_size x cell.state_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n loop_function: If not None, this function will be applied to the i-th output\n in order to generate the i+1-st input, and decoder_inputs will be ignored,\n except for the first element (\"GO\" symbol). This can be used for decoding,\n but also for training to emulate http:\/\/arxiv.org\/abs\/1506.03099.\n Signature -- loop_function(prev, i) = next\n * prev is a 2D Tensor of shape [batch_size x output_size],\n * i is an integer, the step number (when advanced control is needed),\n * next is a 2D Tensor of shape [batch_size x input_size].\n scope: VariableScope for the created subgraph; defaults to \"rnn_decoder\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing generated outputs.\n state: The state of each cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n (Note that in some cases, like basic RNN cell or GRU cell, outputs and\n states can be the same. They are different for LSTM cells though.)","docstring_summary":"RNN decoder for the sequence-to-sequence model.","docstring_tokens":["RNN","decoder","for","the","sequence","-","to","-","sequence","model","."],"function":"def beam_rnn_decoder(decoder_inputs, initial_state, cell, loop_function=None,\n scope=None,output_projection=None, beam_size=1):\n \"\"\"RNN decoder for the sequence-to-sequence model.\n\n Args:\n decoder_inputs: A list of 2D Tensors [batch_size x input_size].\n initial_state: 2D Tensor with shape [batch_size x cell.state_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n loop_function: If not None, this function will be applied to the i-th output\n in order to generate the i+1-st input, and decoder_inputs will be ignored,\n except for the first element (\"GO\" symbol). This can be used for decoding,\n but also for training to emulate http:\/\/arxiv.org\/abs\/1506.03099.\n Signature -- loop_function(prev, i) = next\n * prev is a 2D Tensor of shape [batch_size x output_size],\n * i is an integer, the step number (when advanced control is needed),\n * next is a 2D Tensor of shape [batch_size x input_size].\n scope: VariableScope for the created subgraph; defaults to \"rnn_decoder\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x output_size] containing generated outputs.\n state: The state of each cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n (Note that in some cases, like basic RNN cell or GRU cell, outputs and\n states can be the same. They are different for LSTM cells though.)\n \"\"\"\n with variable_scope.variable_scope(scope or \"rnn_decoder\"):\n state = initial_state\n outputs = []\n prev = None\n log_beam_probs, beam_path, beam_symbols = [],[],[]\n state_size = int(initial_state.get_shape().with_rank(2)[1])\n\n for i, inp in enumerate(decoder_inputs):\n if loop_function is not None and prev is not None:\n with variable_scope.variable_scope(\"loop_function\", reuse=True):\n inp = loop_function(prev, i,log_beam_probs, beam_path, beam_symbols)\n if i > 0:\n variable_scope.get_variable_scope().reuse_variables()\n\n input_size = inp.get_shape().with_rank(2)[1]\n x = inp\n output, state = cell(x, state)\n\n if loop_function is not None:\n prev = output\n if i ==0:\n states =[]\n for kk in range(beam_size):\n states.append(state)\n state = tf.reshape(tf.concat(0, states), [-1, state_size])\n\n outputs.append(tf.argmax(nn_ops.xw_plus_b(\n output, output_projection[0], output_projection[1]), dimension=1))\n return outputs, state, tf.reshape(tf.concat(0, beam_path),[-1,beam_size]), tf.reshape(tf.concat(0, beam_symbols),[-1,beam_size])","function_tokens":["def","beam_rnn_decoder","(","decoder_inputs",",","initial_state",",","cell",",","loop_function","=","None",",","scope","=","None",",","output_projection","=","None",",","beam_size","=","1",")",":","with","variable_scope",".","variable_scope","(","scope","or","\"rnn_decoder\"",")",":","state","=","initial_state","outputs","=","[","]","prev","=","None","log_beam_probs",",","beam_path",",","beam_symbols","=","[","]",",","[","]",",","[","]","state_size","=","int","(","initial_state",".","get_shape","(",")",".","with_rank","(","2",")","[","1","]",")","for","i",",","inp","in","enumerate","(","decoder_inputs",")",":","if","loop_function","is","not","None","and","prev","is","not","None",":","with","variable_scope",".","variable_scope","(","\"loop_function\"",",","reuse","=","True",")",":","inp","=","loop_function","(","prev",",","i",",","log_beam_probs",",","beam_path",",","beam_symbols",")","if","i",">","0",":","variable_scope",".","get_variable_scope","(",")",".","reuse_variables","(",")","input_size","=","inp",".","get_shape","(",")",".","with_rank","(","2",")","[","1","]","x","=","inp","output",",","state","=","cell","(","x",",","state",")","if","loop_function","is","not","None",":","prev","=","output","if","i","==","0",":","states","=","[","]","for","kk","in","range","(","beam_size",")",":","states",".","append","(","state",")","state","=","tf",".","reshape","(","tf",".","concat","(","0",",","states",")",",","[","-","1",",","state_size","]",")","outputs",".","append","(","tf",".","argmax","(","nn_ops",".","xw_plus_b","(","output",",","output_projection","[","0","]",",","output_projection","[","1","]",")",",","dimension","=","1",")",")","return","outputs",",","state",",","tf",".","reshape","(","tf",".","concat","(","0",",","beam_path",")",",","[","-","1",",","beam_size","]",")",",","tf",".","reshape","(","tf",".","concat","(","0",",","beam_symbols",")",",","[","-","1",",","beam_size","]",")"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq.py#L173-L228"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq.py","language":"python","identifier":"embedding_rnn_decoder","parameters":"(decoder_inputs,\n initial_state,\n cell,\n embedding,\n num_symbols,\n embedding_size,\n word_dropout_keep_prob=1,\n replace_input=None,\n output_projection=None,\n feed_previous=False,\n update_embedding_for_previous=True,\n weight_initializer=None,\n beam_size=1,\n scope=None)","argument_list":"","return_statement":"","docstring":"RNN decoder with embedding and a pure-decoding option.\n\n Args:\n decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).\n initial_state: 2D Tensor [batch_size x cell.state_size].\n cell: rnn_cell.RNNCell defining the cell function.\n num_symbols: Integer, how many symbols come into the embedding.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_symbols] and B has\n shape [num_symbols]; if provided and feed_previous=True, each fed\n previous output will first be multiplied by W and added B.\n feed_previous: Boolean; if True, only the first of decoder_inputs will be\n used (the \"GO\" symbol), and all other decoder inputs will be generated by:\n next = embedding_lookup(embedding, argmax(previous_output)),\n In effect, this implements a greedy decoder. It can also be used\n during training to emulate http:\/\/arxiv.org\/abs\/1506.03099.\n If False, decoder_inputs are used as given (the standard decoder case).\n update_embedding_for_previous: Boolean; if False and feed_previous=True,\n only the embedding for the first symbol of decoder_inputs (the \"GO\"\n symbol) will be updated by back propagation. Embeddings for the symbols\n generated from the decoder itself remain unchanged. This parameter has\n no effect if feed_previous=False.\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_rnn_decoder\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors. The\n output is of shape [batch_size x cell.output_size] when\n output_projection is not None (and represents the dense representation\n of predicted tokens). It is of shape [batch_size x num_decoder_symbols]\n when output_projection is None.\n state: The state of each decoder cell in each time-step. This is a list\n with length len(decoder_inputs) -- one item for each time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n\n Raises:\n ValueError: When output_projection has the wrong shape.","docstring_summary":"RNN decoder with embedding and a pure-decoding option.","docstring_tokens":["RNN","decoder","with","embedding","and","a","pure","-","decoding","option","."],"function":"def embedding_rnn_decoder(decoder_inputs,\n initial_state,\n cell,\n embedding,\n num_symbols,\n embedding_size,\n word_dropout_keep_prob=1,\n replace_input=None,\n output_projection=None,\n feed_previous=False,\n update_embedding_for_previous=True,\n weight_initializer=None,\n beam_size=1,\n scope=None):\n \"\"\"RNN decoder with embedding and a pure-decoding option.\n\n Args:\n decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).\n initial_state: 2D Tensor [batch_size x cell.state_size].\n cell: rnn_cell.RNNCell defining the cell function.\n num_symbols: Integer, how many symbols come into the embedding.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_symbols] and B has\n shape [num_symbols]; if provided and feed_previous=True, each fed\n previous output will first be multiplied by W and added B.\n feed_previous: Boolean; if True, only the first of decoder_inputs will be\n used (the \"GO\" symbol), and all other decoder inputs will be generated by:\n next = embedding_lookup(embedding, argmax(previous_output)),\n In effect, this implements a greedy decoder. It can also be used\n during training to emulate http:\/\/arxiv.org\/abs\/1506.03099.\n If False, decoder_inputs are used as given (the standard decoder case).\n update_embedding_for_previous: Boolean; if False and feed_previous=True,\n only the embedding for the first symbol of decoder_inputs (the \"GO\"\n symbol) will be updated by back propagation. Embeddings for the symbols\n generated from the decoder itself remain unchanged. This parameter has\n no effect if feed_previous=False.\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_rnn_decoder\".\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors. The\n output is of shape [batch_size x cell.output_size] when\n output_projection is not None (and represents the dense representation\n of predicted tokens). It is of shape [batch_size x num_decoder_symbols]\n when output_projection is None.\n state: The state of each decoder cell in each time-step. This is a list\n with length len(decoder_inputs) -- one item for each time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n\n Raises:\n ValueError: When output_projection has the wrong shape.\n \"\"\"\n with variable_scope.variable_scope(scope or \"embedding_rnn_decoder\") as scope:\n if output_projection is not None:\n dtype = scope.dtype\n proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)\n proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])\n proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)\n proj_biases.get_shape().assert_is_compatible_with([num_symbols])\n\n if not embedding:\n embedding = variable_scope.get_variable(\"embedding\", [num_symbols, embedding_size],\n initializer=weight_initializer())\n\n if beam_size > 1:\n loop_function = _extract_beam_search(\n embedding, beam_size,num_symbols,embedding_size, output_projection,\n update_embedding_for_previous)\n else:\n loop_function = _extract_argmax_and_embed(\n embedding, output_projection,\n update_embedding_for_previous) if feed_previous else None\n\n emb_inp = [\n embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs]\n if beam_size > 1:\n return beam_rnn_decoder(emb_inp, initial_state, cell,loop_function=loop_function,\n output_projection=output_projection, beam_size=beam_size)\n\n return rnn_decoder(emb_inp, initial_state, cell, word_dropout_keep_prob, replace_input,\n loop_function=loop_function)","function_tokens":["def","embedding_rnn_decoder","(","decoder_inputs",",","initial_state",",","cell",",","embedding",",","num_symbols",",","embedding_size",",","word_dropout_keep_prob","=","1",",","replace_input","=","None",",","output_projection","=","None",",","feed_previous","=","False",",","update_embedding_for_previous","=","True",",","weight_initializer","=","None",",","beam_size","=","1",",","scope","=","None",")",":","with","variable_scope",".","variable_scope","(","scope","or","\"embedding_rnn_decoder\"",")","as","scope",":","if","output_projection","is","not","None",":","dtype","=","scope",".","dtype","proj_weights","=","ops",".","convert_to_tensor","(","output_projection","[","0","]",",","dtype","=","dtype",")","proj_weights",".","get_shape","(",")",".","assert_is_compatible_with","(","[","None",",","num_symbols","]",")","proj_biases","=","ops",".","convert_to_tensor","(","output_projection","[","1","]",",","dtype","=","dtype",")","proj_biases",".","get_shape","(",")",".","assert_is_compatible_with","(","[","num_symbols","]",")","if","not","embedding",":","embedding","=","variable_scope",".","get_variable","(","\"embedding\"",",","[","num_symbols",",","embedding_size","]",",","initializer","=","weight_initializer","(",")",")","if","beam_size",">","1",":","loop_function","=","_extract_beam_search","(","embedding",",","beam_size",",","num_symbols",",","embedding_size",",","output_projection",",","update_embedding_for_previous",")","else",":","loop_function","=","_extract_argmax_and_embed","(","embedding",",","output_projection",",","update_embedding_for_previous",")","if","feed_previous","else","None","emb_inp","=","[","embedding_ops",".","embedding_lookup","(","embedding",",","i",")","for","i","in","decoder_inputs","]","if","beam_size",">","1",":","return","beam_rnn_decoder","(","emb_inp",",","initial_state",",","cell",",","loop_function","=","loop_function",",","output_projection","=","output_projection",",","beam_size","=","beam_size",")","return","rnn_decoder","(","emb_inp",",","initial_state",",","cell",",","word_dropout_keep_prob",",","replace_input",",","loop_function","=","loop_function",")"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq.py#L231-L313"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq.py","language":"python","identifier":"embedding_attention_encoder","parameters":"(encoder_inputs,\n cell,\n num_encoder_symbols,\n embedding_size,\n dtype=None,\n scope=None)","argument_list":"","return_statement":"","docstring":"Embedding sequence-to-sequence model with attention.\n\n This model first embeds encoder_inputs by a newly created embedding (of shape\n [num_encoder_symbols x input_size]). Then it runs an RNN to encode\n embedded encoder_inputs into a state vector. It keeps the outputs of this\n RNN at every step to use for attention later. Next, it embeds decoder_inputs\n by another newly created embedding (of shape [num_decoder_symbols x\n input_size]). Then it runs attention decoder, initialized with the last\n encoder state, on embedded decoder_inputs and attending to encoder outputs.\n\n Warning: when output_projection is None, the size of the attention vectors\n and variables will be made proportional to num_decoder_symbols, can be large.\n\n Args:\n encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n num_encoder_symbols: Integer; number of symbols on the encoder side.\n num_decoder_symbols: Integer; number of symbols on the decoder side.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n num_heads: Number of attention heads that read from attention_states.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_decoder_symbols] and B has\n shape [num_decoder_symbols]; if provided and feed_previous=True, each\n fed previous output will first be multiplied by W and added B.\n feed_previous: Boolean or scalar Boolean Tensor; if True, only the first\n of decoder_inputs will be used (the \"GO\" symbol), and all other decoder\n inputs will be taken from previous outputs (as in embedding_rnn_decoder).\n If False, decoder_inputs are used as given (the standard decoder case).\n dtype: The dtype of the initial RNN state (default: tf.float32).\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_attention_seq2seq\".\n initial_state_attention: If False (default), initial attentions are zero.\n If True, initialize the attentions from the initial state and attention\n states.\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x num_decoder_symbols] containing the generated\n outputs.\n state: The state of each decoder cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].","docstring_summary":"Embedding sequence-to-sequence model with attention.","docstring_tokens":["Embedding","sequence","-","to","-","sequence","model","with","attention","."],"function":"def embedding_attention_encoder(encoder_inputs,\n cell,\n num_encoder_symbols,\n embedding_size,\n dtype=None,\n scope=None):\n \"\"\"Embedding sequence-to-sequence model with attention.\n\n This model first embeds encoder_inputs by a newly created embedding (of shape\n [num_encoder_symbols x input_size]). Then it runs an RNN to encode\n embedded encoder_inputs into a state vector. It keeps the outputs of this\n RNN at every step to use for attention later. Next, it embeds decoder_inputs\n by another newly created embedding (of shape [num_decoder_symbols x\n input_size]). Then it runs attention decoder, initialized with the last\n encoder state, on embedded decoder_inputs and attending to encoder outputs.\n\n Warning: when output_projection is None, the size of the attention vectors\n and variables will be made proportional to num_decoder_symbols, can be large.\n\n Args:\n encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].\n cell: rnn_cell.RNNCell defining the cell function and size.\n num_encoder_symbols: Integer; number of symbols on the encoder side.\n num_decoder_symbols: Integer; number of symbols on the decoder side.\n embedding_size: Integer, the length of the embedding vector for each symbol.\n num_heads: Number of attention heads that read from attention_states.\n output_projection: None or a pair (W, B) of output projection weights and\n biases; W has shape [output_size x num_decoder_symbols] and B has\n shape [num_decoder_symbols]; if provided and feed_previous=True, each\n fed previous output will first be multiplied by W and added B.\n feed_previous: Boolean or scalar Boolean Tensor; if True, only the first\n of decoder_inputs will be used (the \"GO\" symbol), and all other decoder\n inputs will be taken from previous outputs (as in embedding_rnn_decoder).\n If False, decoder_inputs are used as given (the standard decoder case).\n dtype: The dtype of the initial RNN state (default: tf.float32).\n scope: VariableScope for the created subgraph; defaults to\n \"embedding_attention_seq2seq\".\n initial_state_attention: If False (default), initial attentions are zero.\n If True, initialize the attentions from the initial state and attention\n states.\n\n Returns:\n A tuple of the form (outputs, state), where:\n outputs: A list of the same length as decoder_inputs of 2D Tensors with\n shape [batch_size x num_decoder_symbols] containing the generated\n outputs.\n state: The state of each decoder cell at the final time-step.\n It is a 2D Tensor of shape [batch_size x cell.state_size].\n \"\"\"\n with variable_scope.variable_scope(\n scope or \"embedding_attention_encoder\", dtype=dtype) as scope:\n dtype = scope.dtype\n # Encoder.\n encoder_cell = rnn_cell.EmbeddingWrapper(\n cell, embedding_classes=num_encoder_symbols,\n embedding_size=embedding_size)\n encoder_outputs, encoder_state = rnn.rnn(\n encoder_cell, encoder_inputs, dtype=dtype)\n\n # First calculate a concatenation of encoder outputs to put attention on.\n top_states = [array_ops.reshape(e, [-1, 1, cell.output_size])\n for e in encoder_outputs]\n attention_states = array_ops.concat(1, top_states)\n\n return encoder_state, attention_states","function_tokens":["def","embedding_attention_encoder","(","encoder_inputs",",","cell",",","num_encoder_symbols",",","embedding_size",",","dtype","=","None",",","scope","=","None",")",":","with","variable_scope",".","variable_scope","(","scope","or","\"embedding_attention_encoder\"",",","dtype","=","dtype",")","as","scope",":","dtype","=","scope",".","dtype","# Encoder.","encoder_cell","=","rnn_cell",".","EmbeddingWrapper","(","cell",",","embedding_classes","=","num_encoder_symbols",",","embedding_size","=","embedding_size",")","encoder_outputs",",","encoder_state","=","rnn",".","rnn","(","encoder_cell",",","encoder_inputs",",","dtype","=","dtype",")","# First calculate a concatenation of encoder outputs to put attention on.","top_states","=","[","array_ops",".","reshape","(","e",",","[","-","1",",","1",",","cell",".","output_size","]",")","for","e","in","encoder_outputs","]","attention_states","=","array_ops",".","concat","(","1",",","top_states",")","return","encoder_state",",","attention_states"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq.py#L316-L381"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq.py","language":"python","identifier":"sequence_loss_by_example","parameters":"(logits, targets, weights,\n average_across_timesteps=True,\n softmax_loss_function=None, name=None)","argument_list":"","return_statement":"return log_perps","docstring":"Weighted cross-entropy loss for a sequence of logits (per example).\n\n Args:\n logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].\n targets: List of 1D batch-sized int32 Tensors of the same length as logits.\n weights: List of 1D batch-sized float-Tensors of the same length as logits.\n average_across_timesteps: If set, divide the returned cost by the total\n label weight.\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n name: Optional name for this operation, default: \"sequence_loss_by_example\".\n\n Returns:\n 1D batch-sized float Tensor: The log-perplexity for each sequence.\n\n Raises:\n ValueError: If len(logits) is different from len(targets) or len(weights).","docstring_summary":"Weighted cross-entropy loss for a sequence of logits (per example).","docstring_tokens":["Weighted","cross","-","entropy","loss","for","a","sequence","of","logits","(","per","example",")","."],"function":"def sequence_loss_by_example(logits, targets, weights,\n average_across_timesteps=True,\n softmax_loss_function=None, name=None):\n \"\"\"Weighted cross-entropy loss for a sequence of logits (per example).\n\n Args:\n logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].\n targets: List of 1D batch-sized int32 Tensors of the same length as logits.\n weights: List of 1D batch-sized float-Tensors of the same length as logits.\n average_across_timesteps: If set, divide the returned cost by the total\n label weight.\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n name: Optional name for this operation, default: \"sequence_loss_by_example\".\n\n Returns:\n 1D batch-sized float Tensor: The log-perplexity for each sequence.\n\n Raises:\n ValueError: If len(logits) is different from len(targets) or len(weights).\n \"\"\"\n if len(targets) != len(logits) or len(weights) != len(logits):\n raise ValueError(\"Lengths of logits, weights, and targets must be the same \"\n \"%d, %d, %d.\" % (len(logits), len(weights), len(targets)))\n with ops.name_scope(name, \"sequence_loss_by_example\",\n logits + targets + weights):\n log_perp_list = []\n for logit, target, weight in zip(logits, targets, weights):\n if softmax_loss_function is None:\n # TODO(irving,ebrevdo): This reshape is needed because\n # sequence_loss_by_example is called with scalars sometimes, which\n # violates our general scalar strictness policy.\n target = array_ops.reshape(target, [-1])\n crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(\n logit, target)\n else:\n crossent = softmax_loss_function(logit, target)\n log_perp_list.append(crossent * weight)\n log_perps = math_ops.add_n(log_perp_list)\n if average_across_timesteps:\n total_size = math_ops.add_n(weights)\n total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.\n log_perps \/= total_size\n return log_perps","function_tokens":["def","sequence_loss_by_example","(","logits",",","targets",",","weights",",","average_across_timesteps","=","True",",","softmax_loss_function","=","None",",","name","=","None",")",":","if","len","(","targets",")","!=","len","(","logits",")","or","len","(","weights",")","!=","len","(","logits",")",":","raise","ValueError","(","\"Lengths of logits, weights, and targets must be the same \"","\"%d, %d, %d.\"","%","(","len","(","logits",")",",","len","(","weights",")",",","len","(","targets",")",")",")","with","ops",".","name_scope","(","name",",","\"sequence_loss_by_example\"",",","logits","+","targets","+","weights",")",":","log_perp_list","=","[","]","for","logit",",","target",",","weight","in","zip","(","logits",",","targets",",","weights",")",":","if","softmax_loss_function","is","None",":","# TODO(irving,ebrevdo): This reshape is needed because","# sequence_loss_by_example is called with scalars sometimes, which","# violates our general scalar strictness policy.","target","=","array_ops",".","reshape","(","target",",","[","-","1","]",")","crossent","=","nn_ops",".","sparse_softmax_cross_entropy_with_logits","(","logit",",","target",")","else",":","crossent","=","softmax_loss_function","(","logit",",","target",")","log_perp_list",".","append","(","crossent","*","weight",")","log_perps","=","math_ops",".","add_n","(","log_perp_list",")","if","average_across_timesteps",":","total_size","=","math_ops",".","add_n","(","weights",")","total_size","+=","1e-12","# Just to avoid division by 0 for all-0 weights.","log_perps","\/=","total_size","return","log_perps"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq.py#L413-L456"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq.py","language":"python","identifier":"sequence_loss","parameters":"(logits, targets, weights,\n average_across_timesteps=True, average_across_batch=True,\n softmax_loss_function=None, name=None)","argument_list":"","return_statement":"","docstring":"Weighted cross-entropy loss for a sequence of logits, batch-collapsed.\n\n Args:\n logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].\n targets: List of 1D batch-sized int32 Tensors of the same length as logits.\n weights: List of 1D batch-sized float-Tensors of the same length as logits.\n average_across_timesteps: If set, divide the returned cost by the total\n label weight.\n average_across_batch: If set, divide the returned cost by the batch size.\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n name: Optional name for this operation, defaults to \"sequence_loss\".\n\n Returns:\n A scalar float Tensor: The average log-perplexity per symbol (weighted).\n\n Raises:\n ValueError: If len(logits) is different from len(targets) or len(weights).","docstring_summary":"Weighted cross-entropy loss for a sequence of logits, batch-collapsed.","docstring_tokens":["Weighted","cross","-","entropy","loss","for","a","sequence","of","logits","batch","-","collapsed","."],"function":"def sequence_loss(logits, targets, weights,\n average_across_timesteps=True, average_across_batch=True,\n softmax_loss_function=None, name=None):\n \"\"\"Weighted cross-entropy loss for a sequence of logits, batch-collapsed.\n\n Args:\n logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].\n targets: List of 1D batch-sized int32 Tensors of the same length as logits.\n weights: List of 1D batch-sized float-Tensors of the same length as logits.\n average_across_timesteps: If set, divide the returned cost by the total\n label weight.\n average_across_batch: If set, divide the returned cost by the batch size.\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n name: Optional name for this operation, defaults to \"sequence_loss\".\n\n Returns:\n A scalar float Tensor: The average log-perplexity per symbol (weighted).\n\n Raises:\n ValueError: If len(logits) is different from len(targets) or len(weights).\n \"\"\"\n with ops.name_scope(name, \"sequence_loss\", logits + targets + weights):\n cost = math_ops.reduce_sum(sequence_loss_by_example(\n logits, targets, weights,\n average_across_timesteps=average_across_timesteps,\n softmax_loss_function=softmax_loss_function))\n if average_across_batch:\n batch_size = array_ops.shape(targets[0])[0]\n return cost \/ math_ops.cast(batch_size, cost.dtype)\n else:\n return cost","function_tokens":["def","sequence_loss","(","logits",",","targets",",","weights",",","average_across_timesteps","=","True",",","average_across_batch","=","True",",","softmax_loss_function","=","None",",","name","=","None",")",":","with","ops",".","name_scope","(","name",",","\"sequence_loss\"",",","logits","+","targets","+","weights",")",":","cost","=","math_ops",".","reduce_sum","(","sequence_loss_by_example","(","logits",",","targets",",","weights",",","average_across_timesteps","=","average_across_timesteps",",","softmax_loss_function","=","softmax_loss_function",")",")","if","average_across_batch",":","batch_size","=","array_ops",".","shape","(","targets","[","0","]",")","[","0","]","return","cost","\/","math_ops",".","cast","(","batch_size",",","cost",".","dtype",")","else",":","return","cost"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq.py#L459-L490"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq.py","language":"python","identifier":"model_with_buckets","parameters":"(encoder_inputs, decoder_inputs, targets, weights,\n buckets, seq2seq, softmax_loss_function=None,\n per_example_loss=False, name=None)","argument_list":"","return_statement":"return outputs, losses","docstring":"Create a sequence-to-sequence model with support for bucketing.\n\n The seq2seq argument is a function that defines a sequence-to-sequence model,\n e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))\n\n Args:\n encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.\n decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.\n targets: A list of 1D batch-sized int32 Tensors (desired output sequence).\n weights: List of 1D batch-sized float-Tensors to weight the targets.\n buckets: A list of pairs of (input size, output size) for each bucket.\n seq2seq: A sequence-to-sequence model function; it takes 2 input that\n agree with encoder_inputs and decoder_inputs, and returns a pair\n consisting of outputs and states (as, e.g., basic_rnn_seq2seq).\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n per_example_loss: Boolean. If set, the returned loss will be a batch-sized\n tensor of losses for each sequence in the batch. If unset, it will be\n a scalar with the averaged loss from all examples.\n name: Optional name for this operation, defaults to \"model_with_buckets\".\n\n Returns:\n A tuple of the form (outputs, losses), where:\n outputs: The outputs for each bucket. Its j'th element consists of a list\n of 2D Tensors. The shape of output tensors can be either\n [batch_size x output_size] or [batch_size x num_decoder_symbols]\n depending on the seq2seq model used.\n losses: List of scalar Tensors, representing losses for each bucket, or,\n if per_example_loss is set, a list of 1D batch-sized float Tensors.\n\n Raises:\n ValueError: If length of encoder_inputsut, targets, or weights is smaller\n than the largest (last) bucket.","docstring_summary":"Create a sequence-to-sequence model with support for bucketing.","docstring_tokens":["Create","a","sequence","-","to","-","sequence","model","with","support","for","bucketing","."],"function":"def model_with_buckets(encoder_inputs, decoder_inputs, targets, weights,\n buckets, seq2seq, softmax_loss_function=None,\n per_example_loss=False, name=None):\n \"\"\"Create a sequence-to-sequence model with support for bucketing.\n\n The seq2seq argument is a function that defines a sequence-to-sequence model,\n e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))\n\n Args:\n encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.\n decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.\n targets: A list of 1D batch-sized int32 Tensors (desired output sequence).\n weights: List of 1D batch-sized float-Tensors to weight the targets.\n buckets: A list of pairs of (input size, output size) for each bucket.\n seq2seq: A sequence-to-sequence model function; it takes 2 input that\n agree with encoder_inputs and decoder_inputs, and returns a pair\n consisting of outputs and states (as, e.g., basic_rnn_seq2seq).\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n per_example_loss: Boolean. If set, the returned loss will be a batch-sized\n tensor of losses for each sequence in the batch. If unset, it will be\n a scalar with the averaged loss from all examples.\n name: Optional name for this operation, defaults to \"model_with_buckets\".\n\n Returns:\n A tuple of the form (outputs, losses), where:\n outputs: The outputs for each bucket. Its j'th element consists of a list\n of 2D Tensors. The shape of output tensors can be either\n [batch_size x output_size] or [batch_size x num_decoder_symbols]\n depending on the seq2seq model used.\n losses: List of scalar Tensors, representing losses for each bucket, or,\n if per_example_loss is set, a list of 1D batch-sized float Tensors.\n\n Raises:\n ValueError: If length of encoder_inputsut, targets, or weights is smaller\n than the largest (last) bucket.\n \"\"\"\n if len(encoder_inputs) < buckets[-1][0]:\n raise ValueError(\"Length of encoder_inputs (%d) must be at least that of la\"\n \"st bucket (%d).\" % (len(encoder_inputs), buckets[-1][0]))\n if len(targets) < buckets[-1][1]:\n raise ValueError(\"Length of targets (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(targets), buckets[-1][1]))\n if len(weights) < buckets[-1][1]:\n raise ValueError(\"Length of weights (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(weights), buckets[-1][1]))\n\n all_inputs = encoder_inputs + decoder_inputs + targets + weights\n losses = []\n outputs = []\n with ops.name_scope(name, \"model_with_buckets\", all_inputs):\n for j, bucket in enumerate(buckets):\n with variable_scope.variable_scope(variable_scope.get_variable_scope(),\n reuse=True if j > 0 else None):\n bucket_outputs, _ = seq2seq(encoder_inputs[:bucket[0]],\n decoder_inputs[:bucket[1]])\n outputs.append(bucket_outputs)\n if per_example_loss:\n losses.append(sequence_loss_by_example(\n outputs[-1], targets[:bucket[1]], weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n else:\n losses.append(sequence_loss(\n outputs[-1], targets[:bucket[1]], weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n\n return outputs, losses","function_tokens":["def","model_with_buckets","(","encoder_inputs",",","decoder_inputs",",","targets",",","weights",",","buckets",",","seq2seq",",","softmax_loss_function","=","None",",","per_example_loss","=","False",",","name","=","None",")",":","if","len","(","encoder_inputs",")","<","buckets","[","-","1","]","[","0","]",":","raise","ValueError","(","\"Length of encoder_inputs (%d) must be at least that of la\"","\"st bucket (%d).\"","%","(","len","(","encoder_inputs",")",",","buckets","[","-","1","]","[","0","]",")",")","if","len","(","targets",")","<","buckets","[","-","1","]","[","1","]",":","raise","ValueError","(","\"Length of targets (%d) must be at least that of last\"","\"bucket (%d).\"","%","(","len","(","targets",")",",","buckets","[","-","1","]","[","1","]",")",")","if","len","(","weights",")","<","buckets","[","-","1","]","[","1","]",":","raise","ValueError","(","\"Length of weights (%d) must be at least that of last\"","\"bucket (%d).\"","%","(","len","(","weights",")",",","buckets","[","-","1","]","[","1","]",")",")","all_inputs","=","encoder_inputs","+","decoder_inputs","+","targets","+","weights","losses","=","[","]","outputs","=","[","]","with","ops",".","name_scope","(","name",",","\"model_with_buckets\"",",","all_inputs",")",":","for","j",",","bucket","in","enumerate","(","buckets",")",":","with","variable_scope",".","variable_scope","(","variable_scope",".","get_variable_scope","(",")",",","reuse","=","True","if","j",">","0","else","None",")",":","bucket_outputs",",","_","=","seq2seq","(","encoder_inputs","[",":","bucket","[","0","]","]",",","decoder_inputs","[",":","bucket","[","1","]","]",")","outputs",".","append","(","bucket_outputs",")","if","per_example_loss",":","losses",".","append","(","sequence_loss_by_example","(","outputs","[","-","1","]",",","targets","[",":","bucket","[","1","]","]",",","weights","[",":","bucket","[","1","]","]",",","softmax_loss_function","=","softmax_loss_function",")",")","else",":","losses",".","append","(","sequence_loss","(","outputs","[","-","1","]",",","targets","[",":","bucket","[","1","]","]",",","weights","[",":","bucket","[","1","]","]",",","softmax_loss_function","=","softmax_loss_function",")",")","return","outputs",",","losses"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq.py#L493-L559"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq.py","language":"python","identifier":"autoencoder_with_buckets","parameters":"(encoder_inputs, decoder_inputs, targets, weights,\n buckets, encoder, decoder, softmax_loss_function=None,\n per_example_loss=False, name=None)","argument_list":"","return_statement":"return outputs, losses","docstring":"Create a sequence-to-sequence model with support for bucketing.\n\n The seq2seq argument is a function that defines a sequence-to-sequence model,\n e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))\n\n Args:\n encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.\n decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.\n targets: A list of 1D batch-sized int32 Tensors (desired output sequence).\n weights: List of 1D batch-sized float-Tensors to weight the targets.\n buckets: A list of pairs of (input size, output size) for each bucket.\n seq2seq: A sequence-to-sequence model function; it takes 2 input that\n agree with encoder_inputs and decoder_inputs, and returns a pair\n consisting of outputs and states (as, e.g., basic_rnn_seq2seq).\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n per_example_loss: Boolean. If set, the returned loss will be a batch-sized\n tensor of losses for each sequence in the batch. If unset, it will be\n a scalar with the averaged loss from all examples.\n name: Optional name for this operation, defaults to \"model_with_buckets\".\n\n Returns:\n A tuple of the form (outputs, losses), where:\n outputs: The outputs for each bucket. Its j'th element consists of a list\n of 2D Tensors. The shape of output tensors can be either\n [batch_size x output_size] or [batch_size x num_decoder_symbols]\n depending on the seq2seq model used.\n losses: List of scalar Tensors, representing losses for each bucket, or,\n if per_example_loss is set, a list of 1D batch-sized float Tensors.\n\n Raises:\n ValueError: If length of encoder_inputsut, targets, or weights is smaller\n than the largest (last) bucket.","docstring_summary":"Create a sequence-to-sequence model with support for bucketing.","docstring_tokens":["Create","a","sequence","-","to","-","sequence","model","with","support","for","bucketing","."],"function":"def autoencoder_with_buckets(encoder_inputs, decoder_inputs, targets, weights,\n buckets, encoder, decoder, softmax_loss_function=None,\n per_example_loss=False, name=None):\n \"\"\"Create a sequence-to-sequence model with support for bucketing.\n\n The seq2seq argument is a function that defines a sequence-to-sequence model,\n e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))\n\n Args:\n encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.\n decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.\n targets: A list of 1D batch-sized int32 Tensors (desired output sequence).\n weights: List of 1D batch-sized float-Tensors to weight the targets.\n buckets: A list of pairs of (input size, output size) for each bucket.\n seq2seq: A sequence-to-sequence model function; it takes 2 input that\n agree with encoder_inputs and decoder_inputs, and returns a pair\n consisting of outputs and states (as, e.g., basic_rnn_seq2seq).\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n per_example_loss: Boolean. If set, the returned loss will be a batch-sized\n tensor of losses for each sequence in the batch. If unset, it will be\n a scalar with the averaged loss from all examples.\n name: Optional name for this operation, defaults to \"model_with_buckets\".\n\n Returns:\n A tuple of the form (outputs, losses), where:\n outputs: The outputs for each bucket. Its j'th element consists of a list\n of 2D Tensors. The shape of output tensors can be either\n [batch_size x output_size] or [batch_size x num_decoder_symbols]\n depending on the seq2seq model used.\n losses: List of scalar Tensors, representing losses for each bucket, or,\n if per_example_loss is set, a list of 1D batch-sized float Tensors.\n\n Raises:\n ValueError: If length of encoder_inputsut, targets, or weights is smaller\n than the largest (last) bucket.\n \"\"\"\n if len(encoder_inputs) < buckets[-1][0]:\n raise ValueError(\"Length of encoder_inputs (%d) must be at least that of la\"\n \"st bucket (%d).\" % (len(encoder_inputs), buckets[-1][0]))\n if len(targets) < buckets[-1][1]:\n raise ValueError(\"Length of targets (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(targets), buckets[-1][1]))\n if len(weights) < buckets[-1][1]:\n raise ValueError(\"Length of weights (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(weights), buckets[-1][1]))\n\n all_inputs = encoder_inputs + decoder_inputs + targets + weights\n losses = []\n outputs = []\n with ops.name_scope(name, \"model_with_buckets\", all_inputs):\n for j, bucket in enumerate(buckets):\n with variable_scope.variable_scope(variable_scope.get_variable_scope(),\n reuse=True if j > 0 else None):\n encoder_state = encoder(encoder_inputs[:bucket[0]])\n bucket_outputs, _ = decoder(encoder_state, decoder_inputs[:bucket[1]])\n outputs.append(bucket_outputs)\n if per_example_loss:\n losses.append(sequence_loss_by_example(\n outputs[-1], targets[:bucket[1]], weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n else:\n losses.append(sequence_loss(\n outputs[-1], targets[:bucket[1]], weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n\n return outputs, losses","function_tokens":["def","autoencoder_with_buckets","(","encoder_inputs",",","decoder_inputs",",","targets",",","weights",",","buckets",",","encoder",",","decoder",",","softmax_loss_function","=","None",",","per_example_loss","=","False",",","name","=","None",")",":","if","len","(","encoder_inputs",")","<","buckets","[","-","1","]","[","0","]",":","raise","ValueError","(","\"Length of encoder_inputs (%d) must be at least that of la\"","\"st bucket (%d).\"","%","(","len","(","encoder_inputs",")",",","buckets","[","-","1","]","[","0","]",")",")","if","len","(","targets",")","<","buckets","[","-","1","]","[","1","]",":","raise","ValueError","(","\"Length of targets (%d) must be at least that of last\"","\"bucket (%d).\"","%","(","len","(","targets",")",",","buckets","[","-","1","]","[","1","]",")",")","if","len","(","weights",")","<","buckets","[","-","1","]","[","1","]",":","raise","ValueError","(","\"Length of weights (%d) must be at least that of last\"","\"bucket (%d).\"","%","(","len","(","weights",")",",","buckets","[","-","1","]","[","1","]",")",")","all_inputs","=","encoder_inputs","+","decoder_inputs","+","targets","+","weights","losses","=","[","]","outputs","=","[","]","with","ops",".","name_scope","(","name",",","\"model_with_buckets\"",",","all_inputs",")",":","for","j",",","bucket","in","enumerate","(","buckets",")",":","with","variable_scope",".","variable_scope","(","variable_scope",".","get_variable_scope","(",")",",","reuse","=","True","if","j",">","0","else","None",")",":","encoder_state","=","encoder","(","encoder_inputs","[",":","bucket","[","0","]","]",")","bucket_outputs",",","_","=","decoder","(","encoder_state",",","decoder_inputs","[",":","bucket","[","1","]","]",")","outputs",".","append","(","bucket_outputs",")","if","per_example_loss",":","losses",".","append","(","sequence_loss_by_example","(","outputs","[","-","1","]",",","targets","[",":","bucket","[","1","]","]",",","weights","[",":","bucket","[","1","]","]",",","softmax_loss_function","=","softmax_loss_function",")",")","else",":","losses",".","append","(","sequence_loss","(","outputs","[","-","1","]",",","targets","[",":","bucket","[","1","]","]",",","weights","[",":","bucket","[","1","]","]",",","softmax_loss_function","=","softmax_loss_function",")",")","return","outputs",",","losses"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq.py#L562-L628"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq.py","language":"python","identifier":"sample","parameters":"(means,\n logvars,\n latent_dim,\n iaf=True,\n kl_min=None,\n anneal=False,\n kl_rate=None,\n dtype=None)","argument_list":"","return_statement":"return latent_vector, kl_obj, kl_cost","docstring":"Perform sampling and calculate KL divergence.\n\n Args:\n means: tensor of shape (batch_size, latent_dim)\n logvars: tensor of shape (batch_size, latent_dim)\n latent_dim: dimension of latent space.\n iaf: perform linear IAF or not.\n kl_min: lower bound for KL divergence.\n anneal: perform KL cost annealing or not.\n kl_rate: KL divergence is multiplied by kl_rate if anneal is set to True.\n Returns:\n latent_vector: latent variable after sampling. A vector of shape (batch_size, latent_dim).\n kl_obj: objective to be minimized for the KL term.\n kl_cost: real KL divergence.","docstring_summary":"Perform sampling and calculate KL divergence.","docstring_tokens":["Perform","sampling","and","calculate","KL","divergence","."],"function":"def sample(means,\n logvars,\n latent_dim,\n iaf=True,\n kl_min=None,\n anneal=False,\n kl_rate=None,\n dtype=None):\n \"\"\"Perform sampling and calculate KL divergence.\n\n Args:\n means: tensor of shape (batch_size, latent_dim)\n logvars: tensor of shape (batch_size, latent_dim)\n latent_dim: dimension of latent space.\n iaf: perform linear IAF or not.\n kl_min: lower bound for KL divergence.\n anneal: perform KL cost annealing or not.\n kl_rate: KL divergence is multiplied by kl_rate if anneal is set to True.\n Returns:\n latent_vector: latent variable after sampling. A vector of shape (batch_size, latent_dim).\n kl_obj: objective to be minimized for the KL term.\n kl_cost: real KL divergence.\n \"\"\"\n if iaf:\n with tf.variable_scope('iaf'):\n prior = DiagonalGaussian(tf.zeros_like(means, dtype=dtype),\n tf.zeros_like(logvars, dtype=dtype))\n posterior = DiagonalGaussian(means, logvars)\n z = posterior.sample\n\n logqs = posterior.logps(z)\n L = tf.get_variable(\"inverse_cholesky\", [latent_dim, latent_dim], dtype=dtype, initializer=tf.zeros_initializer)\n diag_one = tf.ones([latent_dim], dtype=dtype)\n L = tf.matrix_set_diag(L, diag_one)\n mask = np.tril(np.ones([latent_dim,latent_dim]))\n L = L * mask\n latent_vector = tf.matmul(z, L)\n logps = prior.logps(latent_vector)\n kl_cost = logqs - logps\n else:\n noise = tf.random_normal(tf.shape(mean))\n sample = mean + tf.exp(0.5 * logvar) * noise\n kl_cost = -0.5 * (logvars - tf.square(means) -\n tf.exp(logvars) + 1.0)\n kl_ave = tf.reduce_mean(kl_cost, [0]) #mean of kl_cost over batches\n kl_obj = kl_cost = tf.reduce_sum(kl_ave)\n if kl_min:\n kl_obj = tf.reduce_sum(tf.maximum(kl_ave, kl_min))\n if anneal:\n kl_obj = kl_obj * kl_rate\n\n return latent_vector, kl_obj, kl_cost","function_tokens":["def","sample","(","means",",","logvars",",","latent_dim",",","iaf","=","True",",","kl_min","=","None",",","anneal","=","False",",","kl_rate","=","None",",","dtype","=","None",")",":","if","iaf",":","with","tf",".","variable_scope","(","'iaf'",")",":","prior","=","DiagonalGaussian","(","tf",".","zeros_like","(","means",",","dtype","=","dtype",")",",","tf",".","zeros_like","(","logvars",",","dtype","=","dtype",")",")","posterior","=","DiagonalGaussian","(","means",",","logvars",")","z","=","posterior",".","sample","logqs","=","posterior",".","logps","(","z",")","L","=","tf",".","get_variable","(","\"inverse_cholesky\"",",","[","latent_dim",",","latent_dim","]",",","dtype","=","dtype",",","initializer","=","tf",".","zeros_initializer",")","diag_one","=","tf",".","ones","(","[","latent_dim","]",",","dtype","=","dtype",")","L","=","tf",".","matrix_set_diag","(","L",",","diag_one",")","mask","=","np",".","tril","(","np",".","ones","(","[","latent_dim",",","latent_dim","]",")",")","L","=","L","*","mask","latent_vector","=","tf",".","matmul","(","z",",","L",")","logps","=","prior",".","logps","(","latent_vector",")","kl_cost","=","logqs","-","logps","else",":","noise","=","tf",".","random_normal","(","tf",".","shape","(","mean",")",")","sample","=","mean","+","tf",".","exp","(","0.5","*","logvar",")","*","noise","kl_cost","=","-","0.5","*","(","logvars","-","tf",".","square","(","means",")","-","tf",".","exp","(","logvars",")","+","1.0",")","kl_ave","=","tf",".","reduce_mean","(","kl_cost",",","[","0","]",")","#mean of kl_cost over batches","kl_obj","=","kl_cost","=","tf",".","reduce_sum","(","kl_ave",")","if","kl_min",":","kl_obj","=","tf",".","reduce_sum","(","tf",".","maximum","(","kl_ave",",","kl_min",")",")","if","anneal",":","kl_obj","=","kl_obj","*","kl_rate","return","latent_vector",",","kl_obj",",","kl_cost"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq.py#L631-L682"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq.py","language":"python","identifier":"variational_autoencoder_with_buckets","parameters":"(encoder_inputs, decoder_inputs, targets, weights,\n buckets, encoder, decoder, enc_latent, latent_dec, sample, kl_f,\n probabilistic=False,\n softmax_loss_function=None,\n per_example_loss=False, name=None)","argument_list":"","return_statement":"return outputs, losses, KL_divergences","docstring":"Create a sequence-to-sequence model with support for bucketing.\n\n The seq2seq argument is a function that defines a sequence-to-sequence model,\n e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))\n\n Args:\n encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.\n decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.\n targets: A list of 1D batch-sized int32 Tensors (desired output sequence).\n weights: List of 1D batch-sized float-Tensors to weight the targets.\n buckets: A list of pairs of (input size, output size) for each bucket.\n seq2seq: A sequence-to-sequence model function; it takes 2 input that\n agree with encoder_inputs and decoder_inputs, and returns a pair\n consisting of outputs and states (as, e.g., basic_rnn_seq2seq).\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n per_example_loss: Boolean. If set, the returned loss will be a batch-sized\n tensor of losses for each sequence in the batch. If unset, it will be\n a scalar with the averaged loss from all examples.\n name: Optional name for this operation, defaults to \"model_with_buckets\".\n\n Returns:\n A tuple of the form (outputs, losses), where:\n outputs: The outputs for each bucket. Its j'th element consists of a list\n of 2D Tensors. The shape of output tensors can be either\n [batch_size x output_size] or [batch_size x num_decoder_symbols]\n depending on the seq2seq model used.\n losses: List of scalar Tensors, representing losses for each bucket, or,\n if per_example_loss is set, a list of 1D batch-sized float Tensors.\n\n Raises:\n ValueError: If length of encoder_inputsut, targets, or weights is smaller\n than the largest (last) bucket.","docstring_summary":"Create a sequence-to-sequence model with support for bucketing.","docstring_tokens":["Create","a","sequence","-","to","-","sequence","model","with","support","for","bucketing","."],"function":"def variational_autoencoder_with_buckets(encoder_inputs, decoder_inputs, targets, weights,\n buckets, encoder, decoder, enc_latent, latent_dec, sample, kl_f,\n probabilistic=False,\n softmax_loss_function=None,\n per_example_loss=False, name=None):\n \"\"\"Create a sequence-to-sequence model with support for bucketing.\n\n The seq2seq argument is a function that defines a sequence-to-sequence model,\n e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))\n\n Args:\n encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.\n decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.\n targets: A list of 1D batch-sized int32 Tensors (desired output sequence).\n weights: List of 1D batch-sized float-Tensors to weight the targets.\n buckets: A list of pairs of (input size, output size) for each bucket.\n seq2seq: A sequence-to-sequence model function; it takes 2 input that\n agree with encoder_inputs and decoder_inputs, and returns a pair\n consisting of outputs and states (as, e.g., basic_rnn_seq2seq).\n softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch\n to be used instead of the standard softmax (the default if this is None).\n per_example_loss: Boolean. If set, the returned loss will be a batch-sized\n tensor of losses for each sequence in the batch. If unset, it will be\n a scalar with the averaged loss from all examples.\n name: Optional name for this operation, defaults to \"model_with_buckets\".\n\n Returns:\n A tuple of the form (outputs, losses), where:\n outputs: The outputs for each bucket. Its j'th element consists of a list\n of 2D Tensors. The shape of output tensors can be either\n [batch_size x output_size] or [batch_size x num_decoder_symbols]\n depending on the seq2seq model used.\n losses: List of scalar Tensors, representing losses for each bucket, or,\n if per_example_loss is set, a list of 1D batch-sized float Tensors.\n\n Raises:\n ValueError: If length of encoder_inputsut, targets, or weights is smaller\n than the largest (last) bucket.\n \"\"\"\n if len(encoder_inputs) < buckets[-1][0]:\n raise ValueError(\"Length of encoder_inputs (%d) must be at least that of la\"\n \"st bucket (%d).\" % (len(encoder_inputs), buckets[-1][0]))\n if len(targets) < buckets[-1][1]:\n raise ValueError(\"Length of targets (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(targets), buckets[-1][1]))\n if len(weights) < buckets[-1][1]:\n raise ValueError(\"Length of weights (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(weights), buckets[-1][1]))\n\n all_inputs = encoder_inputs + decoder_inputs + targets + weights\n losses = []\n outputs = []\n KL_divergences = []\n with ops.name_scope(name, \"variational_autoencoder_with_buckets\", all_inputs):\n for j, bucket in enumerate(buckets):\n with variable_scope.variable_scope(variable_scope.get_variable_scope(),\n reuse=True if j > 0 else None):\n encoder_last_state = encoder(encoder_inputs[:bucket[0]])\n mean, logvar = enc_latent(encoder_last_state)\n if probabilistic:\n latent_vector = sample(mean, logvar)\n else:\n latent_vector = mean\n decoder_initial_state = latent_dec(latent_vector)\n bucket_outputs, _ = decoder(decoder_initial_state, decoder_inputs[:bucket[1]])\n outputs.append(bucket_outputs)\n total_size = math_ops.add_n(weights[:bucket[1]])\n total_size += 1e-12\n KL_divergences.append(tf.reduce_mean(kl_f(mean, logvar) \/ total_size))\n if per_example_loss:\n losses.append(sequence_loss_by_example(\n outputs[-1], targets[:bucket[1]], weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n else:\n losses.append(sequence_loss(\n outputs[-1], targets[:bucket[1]], weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n\n return outputs, losses, KL_divergences","function_tokens":["def","variational_autoencoder_with_buckets","(","encoder_inputs",",","decoder_inputs",",","targets",",","weights",",","buckets",",","encoder",",","decoder",",","enc_latent",",","latent_dec",",","sample",",","kl_f",",","probabilistic","=","False",",","softmax_loss_function","=","None",",","per_example_loss","=","False",",","name","=","None",")",":","if","len","(","encoder_inputs",")","<","buckets","[","-","1","]","[","0","]",":","raise","ValueError","(","\"Length of encoder_inputs (%d) must be at least that of la\"","\"st bucket (%d).\"","%","(","len","(","encoder_inputs",")",",","buckets","[","-","1","]","[","0","]",")",")","if","len","(","targets",")","<","buckets","[","-","1","]","[","1","]",":","raise","ValueError","(","\"Length of targets (%d) must be at least that of last\"","\"bucket (%d).\"","%","(","len","(","targets",")",",","buckets","[","-","1","]","[","1","]",")",")","if","len","(","weights",")","<","buckets","[","-","1","]","[","1","]",":","raise","ValueError","(","\"Length of weights (%d) must be at least that of last\"","\"bucket (%d).\"","%","(","len","(","weights",")",",","buckets","[","-","1","]","[","1","]",")",")","all_inputs","=","encoder_inputs","+","decoder_inputs","+","targets","+","weights","losses","=","[","]","outputs","=","[","]","KL_divergences","=","[","]","with","ops",".","name_scope","(","name",",","\"variational_autoencoder_with_buckets\"",",","all_inputs",")",":","for","j",",","bucket","in","enumerate","(","buckets",")",":","with","variable_scope",".","variable_scope","(","variable_scope",".","get_variable_scope","(",")",",","reuse","=","True","if","j",">","0","else","None",")",":","encoder_last_state","=","encoder","(","encoder_inputs","[",":","bucket","[","0","]","]",")","mean",",","logvar","=","enc_latent","(","encoder_last_state",")","if","probabilistic",":","latent_vector","=","sample","(","mean",",","logvar",")","else",":","latent_vector","=","mean","decoder_initial_state","=","latent_dec","(","latent_vector",")","bucket_outputs",",","_","=","decoder","(","decoder_initial_state",",","decoder_inputs","[",":","bucket","[","1","]","]",")","outputs",".","append","(","bucket_outputs",")","total_size","=","math_ops",".","add_n","(","weights","[",":","bucket","[","1","]","]",")","total_size","+=","1e-12","KL_divergences",".","append","(","tf",".","reduce_mean","(","kl_f","(","mean",",","logvar",")","\/","total_size",")",")","if","per_example_loss",":","losses",".","append","(","sequence_loss_by_example","(","outputs","[","-","1","]",",","targets","[",":","bucket","[","1","]","]",",","weights","[",":","bucket","[","1","]","]",",","softmax_loss_function","=","softmax_loss_function",")",")","else",":","losses",".","append","(","sequence_loss","(","outputs","[","-","1","]",",","targets","[",":","bucket","[","1","]","]",",","weights","[",":","bucket","[","1","]","]",",","softmax_loss_function","=","softmax_loss_function",")",")","return","outputs",",","losses",",","KL_divergences"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq.py#L740-L818"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq.py","language":"python","identifier":"variational_encoder_with_buckets","parameters":"(encoder_inputs, buckets, encoder,\n enc_latent, softmax_loss_function=None,\n per_example_loss=False, name=None)","argument_list":"","return_statement":"return means, logvars","docstring":"Create a sequence-to-sequence model with support for bucketing.","docstring_summary":"Create a sequence-to-sequence model with support for bucketing.","docstring_tokens":["Create","a","sequence","-","to","-","sequence","model","with","support","for","bucketing","."],"function":"def variational_encoder_with_buckets(encoder_inputs, buckets, encoder,\n enc_latent, softmax_loss_function=None,\n per_example_loss=False, name=None):\n \"\"\"Create a sequence-to-sequence model with support for bucketing.\n \"\"\"\n if len(encoder_inputs) < buckets[-1][0]:\n raise ValueError(\"Length of encoder_inputs (%d) must be at least that of la\"\n \"st bucket (%d).\" % (len(encoder_inputs), buckets[-1][0]))\n\n all_inputs = encoder_inputs\n means = []\n logvars = []\n with ops.name_scope(name, \"variational_encoder_with_buckets\", all_inputs):\n for j, bucket in enumerate(buckets):\n with variable_scope.variable_scope(variable_scope.get_variable_scope(),\n reuse=True if j > 0 else None):\n encoder_last_state = encoder(encoder_inputs[:bucket[0]])\n mean, logvar = enc_latent(encoder_last_state)\n means.append(mean)\n logvars.append(logvar)\n\n return means, logvars","function_tokens":["def","variational_encoder_with_buckets","(","encoder_inputs",",","buckets",",","encoder",",","enc_latent",",","softmax_loss_function","=","None",",","per_example_loss","=","False",",","name","=","None",")",":","if","len","(","encoder_inputs",")","<","buckets","[","-","1","]","[","0","]",":","raise","ValueError","(","\"Length of encoder_inputs (%d) must be at least that of la\"","\"st bucket (%d).\"","%","(","len","(","encoder_inputs",")",",","buckets","[","-","1","]","[","0","]",")",")","all_inputs","=","encoder_inputs","means","=","[","]","logvars","=","[","]","with","ops",".","name_scope","(","name",",","\"variational_encoder_with_buckets\"",",","all_inputs",")",":","for","j",",","bucket","in","enumerate","(","buckets",")",":","with","variable_scope",".","variable_scope","(","variable_scope",".","get_variable_scope","(",")",",","reuse","=","True","if","j",">","0","else","None",")",":","encoder_last_state","=","encoder","(","encoder_inputs","[",":","bucket","[","0","]","]",")","mean",",","logvar","=","enc_latent","(","encoder_last_state",")","means",".","append","(","mean",")","logvars",".","append","(","logvar",")","return","means",",","logvars"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq.py#L821-L842"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq.py","language":"python","identifier":"variational_decoder_with_buckets","parameters":"(means, logvars, decoder_inputs,\n targets, weights,\n buckets, decoder, latent_dec, sample,\n softmax_loss_function=None,\n per_example_loss=False, name=None)","argument_list":"","return_statement":"return outputs, losses, KL_objs, KL_costs","docstring":"Create a sequence-to-sequence model with support for bucketing.","docstring_summary":"Create a sequence-to-sequence model with support for bucketing.","docstring_tokens":["Create","a","sequence","-","to","-","sequence","model","with","support","for","bucketing","."],"function":"def variational_decoder_with_buckets(means, logvars, decoder_inputs,\n targets, weights,\n buckets, decoder, latent_dec, sample,\n softmax_loss_function=None,\n per_example_loss=False, name=None):\n \"\"\"Create a sequence-to-sequence model with support for bucketing.\n \"\"\"\n if len(targets) < buckets[-1][1]:\n raise ValueError(\"Length of targets (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(targets), buckets[-1][1]))\n if len(weights) < buckets[-1][1]:\n raise ValueError(\"Length of weights (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(weights), buckets[-1][1]))\n\n all_inputs = decoder_inputs + targets + weights\n losses = []\n outputs = []\n KL_objs = []\n KL_costs = []\n with ops.name_scope(name, \"variational_decoder_with_buckets\", all_inputs):\n for j, bucket in enumerate(buckets):\n with variable_scope.variable_scope(variable_scope.get_variable_scope(),\n reuse=True if j > 0 else None):\n\n latent_vector, kl_obj, kl_cost = sample(means[j], logvars[j])\n decoder_initial_state = latent_dec(latent_vector)\n\n bucket_outputs, _ = decoder(decoder_initial_state, decoder_inputs[:bucket[1]])\n outputs.append(bucket_outputs)\n total_size = math_ops.add_n(weights[:bucket[1]])\n total_size += 1e-12\n KL_objs.append(tf.reduce_mean(kl_obj \/ total_size))\n KL_costs.append(tf.reduce_mean(kl_cost \/ total_size))\n if per_example_loss:\n losses.append(sequence_loss_by_example(\n outputs[-1], targets[:bucket[1]], weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n else:\n losses.append(sequence_loss(\n outputs[-1], targets[:bucket[1]], weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n\n return outputs, losses, KL_objs, KL_costs","function_tokens":["def","variational_decoder_with_buckets","(","means",",","logvars",",","decoder_inputs",",","targets",",","weights",",","buckets",",","decoder",",","latent_dec",",","sample",",","softmax_loss_function","=","None",",","per_example_loss","=","False",",","name","=","None",")",":","if","len","(","targets",")","<","buckets","[","-","1","]","[","1","]",":","raise","ValueError","(","\"Length of targets (%d) must be at least that of last\"","\"bucket (%d).\"","%","(","len","(","targets",")",",","buckets","[","-","1","]","[","1","]",")",")","if","len","(","weights",")","<","buckets","[","-","1","]","[","1","]",":","raise","ValueError","(","\"Length of weights (%d) must be at least that of last\"","\"bucket (%d).\"","%","(","len","(","weights",")",",","buckets","[","-","1","]","[","1","]",")",")","all_inputs","=","decoder_inputs","+","targets","+","weights","losses","=","[","]","outputs","=","[","]","KL_objs","=","[","]","KL_costs","=","[","]","with","ops",".","name_scope","(","name",",","\"variational_decoder_with_buckets\"",",","all_inputs",")",":","for","j",",","bucket","in","enumerate","(","buckets",")",":","with","variable_scope",".","variable_scope","(","variable_scope",".","get_variable_scope","(",")",",","reuse","=","True","if","j",">","0","else","None",")",":","latent_vector",",","kl_obj",",","kl_cost","=","sample","(","means","[","j","]",",","logvars","[","j","]",")","decoder_initial_state","=","latent_dec","(","latent_vector",")","bucket_outputs",",","_","=","decoder","(","decoder_initial_state",",","decoder_inputs","[",":","bucket","[","1","]","]",")","outputs",".","append","(","bucket_outputs",")","total_size","=","math_ops",".","add_n","(","weights","[",":","bucket","[","1","]","]",")","total_size","+=","1e-12","KL_objs",".","append","(","tf",".","reduce_mean","(","kl_obj","\/","total_size",")",")","KL_costs",".","append","(","tf",".","reduce_mean","(","kl_cost","\/","total_size",")",")","if","per_example_loss",":","losses",".","append","(","sequence_loss_by_example","(","outputs","[","-","1","]",",","targets","[",":","bucket","[","1","]","]",",","weights","[",":","bucket","[","1","]","]",",","softmax_loss_function","=","softmax_loss_function",")",")","else",":","losses",".","append","(","sequence_loss","(","outputs","[","-","1","]",",","targets","[",":","bucket","[","1","]","]",",","weights","[",":","bucket","[","1","]","]",",","softmax_loss_function","=","softmax_loss_function",")",")","return","outputs",",","losses",",","KL_objs",",","KL_costs"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq.py#L845-L887"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"seq2seq.py","language":"python","identifier":"variational_beam_decoder_with_buckets","parameters":"(means, logvars, decoder_inputs,\n targets, weights,\n buckets, decoder, latent_dec, kl_f, sample, iaf=False,\n softmax_loss_function=None,\n per_example_loss=False, name=None)","argument_list":"","return_statement":"return outputs, losses, KL_objs, KL_costs","docstring":"Create a sequence-to-sequence model with support for bucketing.","docstring_summary":"Create a sequence-to-sequence model with support for bucketing.","docstring_tokens":["Create","a","sequence","-","to","-","sequence","model","with","support","for","bucketing","."],"function":"def variational_beam_decoder_with_buckets(means, logvars, decoder_inputs,\n targets, weights,\n buckets, decoder, latent_dec, kl_f, sample, iaf=False,\n softmax_loss_function=None,\n per_example_loss=False, name=None):\n \"\"\"Create a sequence-to-sequence model with support for bucketing.\n \"\"\"\n if len(targets) < buckets[-1][1]:\n raise ValueError(\"Length of targets (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(targets), buckets[-1][1]))\n if len(weights) < buckets[-1][1]:\n raise ValueError(\"Length of weights (%d) must be at least that of last\"\n \"bucket (%d).\" % (len(weights), buckets[-1][1]))\n\n all_inputs = decoder_inputs + targets + weights\n losses = []\n outputs = []\n beam_paths = []\n beam_path = []\n KL_divergences = []\n with ops.name_scope(name, \"variational_decoder_with_buckets\", all_inputs):\n for j, bucket in enumerate(buckets):\n with variable_scope.variable_scope(variable_scope.get_variable_scope(),\n reuse=True if j > 0 else None):\n latent_vector, kl_cost = sample(means[j], logvars[j])\n decoder_initial_state = latent_dec(latent_vector)\n\n bucket_outputs, _, beam_path, beam_symbol = decoder(decoder_initial_state, decoder_inputs[:bucket[1]])\n outputs.append(bucket_outputs)\n beam_paths.append(beam_path)\n beam_symbols.append(beam_symbol)\n total_size = math_ops.add_n(weights[:bucket[1]])\n total_size += 1e-12\n KL_divergences.append(tf.reduce_mean(kl_cost \/ total_size))\n if per_example_loss:\n losses.append(sequence_loss_by_example(\n outputs[-1], targets[:bucket[1]], weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n else:\n losses.append(sequence_loss(\n outputs[-1], targets[:bucket[1]], weights[:bucket[1]],\n softmax_loss_function=softmax_loss_function))\n\n return outputs, losses, KL_objs, KL_costs","function_tokens":["def","variational_beam_decoder_with_buckets","(","means",",","logvars",",","decoder_inputs",",","targets",",","weights",",","buckets",",","decoder",",","latent_dec",",","kl_f",",","sample",",","iaf","=","False",",","softmax_loss_function","=","None",",","per_example_loss","=","False",",","name","=","None",")",":","if","len","(","targets",")","<","buckets","[","-","1","]","[","1","]",":","raise","ValueError","(","\"Length of targets (%d) must be at least that of last\"","\"bucket (%d).\"","%","(","len","(","targets",")",",","buckets","[","-","1","]","[","1","]",")",")","if","len","(","weights",")","<","buckets","[","-","1","]","[","1","]",":","raise","ValueError","(","\"Length of weights (%d) must be at least that of last\"","\"bucket (%d).\"","%","(","len","(","weights",")",",","buckets","[","-","1","]","[","1","]",")",")","all_inputs","=","decoder_inputs","+","targets","+","weights","losses","=","[","]","outputs","=","[","]","beam_paths","=","[","]","beam_path","=","[","]","KL_divergences","=","[","]","with","ops",".","name_scope","(","name",",","\"variational_decoder_with_buckets\"",",","all_inputs",")",":","for","j",",","bucket","in","enumerate","(","buckets",")",":","with","variable_scope",".","variable_scope","(","variable_scope",".","get_variable_scope","(",")",",","reuse","=","True","if","j",">","0","else","None",")",":","latent_vector",",","kl_cost","=","sample","(","means","[","j","]",",","logvars","[","j","]",")","decoder_initial_state","=","latent_dec","(","latent_vector",")","bucket_outputs",",","_",",","beam_path",",","beam_symbol","=","decoder","(","decoder_initial_state",",","decoder_inputs","[",":","bucket","[","1","]","]",")","outputs",".","append","(","bucket_outputs",")","beam_paths",".","append","(","beam_path",")","beam_symbols",".","append","(","beam_symbol",")","total_size","=","math_ops",".","add_n","(","weights","[",":","bucket","[","1","]","]",")","total_size","+=","1e-12","KL_divergences",".","append","(","tf",".","reduce_mean","(","kl_cost","\/","total_size",")",")","if","per_example_loss",":","losses",".","append","(","sequence_loss_by_example","(","outputs","[","-","1","]",",","targets","[",":","bucket","[","1","]","]",",","weights","[",":","bucket","[","1","]","]",",","softmax_loss_function","=","softmax_loss_function",")",")","else",":","losses",".","append","(","sequence_loss","(","outputs","[","-","1","]",",","targets","[",":","bucket","[","1","]","]",",","weights","[",":","bucket","[","1","]","]",",","softmax_loss_function","=","softmax_loss_function",")",")","return","outputs",",","losses",",","KL_objs",",","KL_costs"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/seq2seq.py#L890-L933"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"utils\/data_utils.py","language":"python","identifier":"maybe_download","parameters":"(directory, filename, url)","argument_list":"","return_statement":"return filepath","docstring":"Download filename from url unless it's already in directory.","docstring_summary":"Download filename from url unless it's already in directory.","docstring_tokens":["Download","filename","from","url","unless","it","s","already","in","directory","."],"function":"def maybe_download(directory, filename, url):\n \"\"\"Download filename from url unless it's already in directory.\"\"\"\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n filepath = os.path.join(directory, filename)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (url, filepath))\n filepath, _ = urllib.request.urlretrieve(url, filepath)\n statinfo = os.stat(filepath)\n print(\"Succesfully downloaded\", filename, statinfo.st_size, \"bytes\")\n return filepath","function_tokens":["def","maybe_download","(","directory",",","filename",",","url",")",":","if","not","os",".","path",".","exists","(","directory",")",":","print","(","\"Creating directory %s\"","%","directory",")","os",".","mkdir","(","directory",")","filepath","=","os",".","path",".","join","(","directory",",","filename",")","if","not","os",".","path",".","exists","(","filepath",")",":","print","(","\"Downloading %s to %s\"","%","(","url",",","filepath",")",")","filepath",",","_","=","urllib",".","request",".","urlretrieve","(","url",",","filepath",")","statinfo","=","os",".","stat","(","filepath",")","print","(","\"Succesfully downloaded\"",",","filename",",","statinfo",".","st_size",",","\"bytes\"",")","return","filepath"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/utils\/data_utils.py#L52-L63"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"utils\/data_utils.py","language":"python","identifier":"gunzip_file","parameters":"(gz_path, new_path)","argument_list":"","return_statement":"","docstring":"Unzips from gz_path into new_path.","docstring_summary":"Unzips from gz_path into new_path.","docstring_tokens":["Unzips","from","gz_path","into","new_path","."],"function":"def gunzip_file(gz_path, new_path):\n \"\"\"Unzips from gz_path into new_path.\"\"\"\n print(\"Unpacking %s to %s\" % (gz_path, new_path))\n with gzip.open(gz_path, \"r\") as gz_file:\n with open(new_path, \"w\") as new_file:\n for line in gz_file:\n new_file.write(line)","function_tokens":["def","gunzip_file","(","gz_path",",","new_path",")",":","print","(","\"Unpacking %s to %s\"","%","(","gz_path",",","new_path",")",")","with","gzip",".","open","(","gz_path",",","\"r\"",")","as","gz_file",":","with","open","(","new_path",",","\"w\"",")","as","new_file",":","for","line","in","gz_file",":","new_file",".","write","(","line",")"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/utils\/data_utils.py#L66-L72"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"utils\/data_utils.py","language":"python","identifier":"basic_tokenizer","parameters":"(sentence)","argument_list":"","return_statement":"return [w for w in words if w]","docstring":"Very basic tokenizer: split the sentence into a list of tokens.","docstring_summary":"Very basic tokenizer: split the sentence into a list of tokens.","docstring_tokens":["Very","basic","tokenizer",":","split","the","sentence","into","a","list","of","tokens","."],"function":"def basic_tokenizer(sentence):\n \"\"\"Very basic tokenizer: split the sentence into a list of tokens.\"\"\"\n words = []\n for space_separated_fragment in sentence.strip().split():\n words.extend(_WORD_SPLIT.split(space_separated_fragment))\n return [w for w in words if w]","function_tokens":["def","basic_tokenizer","(","sentence",")",":","words","=","[","]","for","space_separated_fragment","in","sentence",".","strip","(",")",".","split","(",")",":","words",".","extend","(","_WORD_SPLIT",".","split","(","space_separated_fragment",")",")","return","[","w","for","w","in","words","if","w","]"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/utils\/data_utils.py#L75-L80"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"utils\/data_utils.py","language":"python","identifier":"create_vocabulary","parameters":"(vocabulary_path, data_path, max_vocabulary_size, embedding_path,\n tokenizer=None, normalize_digits=True)","argument_list":"","return_statement":"","docstring":"Create vocabulary file (if it does not exist yet) from data file.\n\n Data file is assumed to contain one sentence per line. Each sentence is\n tokenized and digits are normalized (if normalize_digits is set).\n Vocabulary contains the most-frequent tokens up to max_vocabulary_size.\n We write it to vocabulary_path in a one-token-per-line format, so that later\n token in the first line gets id=0, second line gets id=1, and so on.\n\n Args:\n vocabulary_path: path where the vocabulary will be created.\n data_path: data file that will be used to create vocabulary.\n max_vocabulary_size: limit on the size of the created vocabulary.\n tokenizer: a function to use to tokenize each data sentence;\n if None, basic_tokenizer will be used.\n normalize_digits: Boolean; if true, all digits are replaced by 0s.","docstring_summary":"Create vocabulary file (if it does not exist yet) from data file.","docstring_tokens":["Create","vocabulary","file","(","if","it","does","not","exist","yet",")","from","data","file","."],"function":"def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size, embedding_path,\n tokenizer=None, normalize_digits=True):\n \"\"\"Create vocabulary file (if it does not exist yet) from data file.\n\n Data file is assumed to contain one sentence per line. Each sentence is\n tokenized and digits are normalized (if normalize_digits is set).\n Vocabulary contains the most-frequent tokens up to max_vocabulary_size.\n We write it to vocabulary_path in a one-token-per-line format, so that later\n token in the first line gets id=0, second line gets id=1, and so on.\n\n Args:\n vocabulary_path: path where the vocabulary will be created.\n data_path: data file that will be used to create vocabulary.\n max_vocabulary_size: limit on the size of the created vocabulary.\n tokenizer: a function to use to tokenize each data sentence;\n if None, basic_tokenizer will be used.\n normalize_digits: Boolean; if true, all digits are replaced by 0s.\n \"\"\"\n if not gfile.Exists(vocabulary_path) or not gfile.Exists(embedding_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n print(\"Creating embedding file %s from data %s\" % (embedding_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"r\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = _DIGIT_RE.sub(\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n with gfile.GFile(embedding_path, mode=\"wb\") as embedding_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")\n embedding_file.write(w + \"\\n\")","function_tokens":["def","create_vocabulary","(","vocabulary_path",",","data_path",",","max_vocabulary_size",",","embedding_path",",","tokenizer","=","None",",","normalize_digits","=","True",")",":","if","not","gfile",".","Exists","(","vocabulary_path",")","or","not","gfile",".","Exists","(","embedding_path",")",":","print","(","\"Creating vocabulary %s from data %s\"","%","(","vocabulary_path",",","data_path",")",")","print","(","\"Creating embedding file %s from data %s\"","%","(","embedding_path",",","data_path",")",")","vocab","=","{","}","with","gfile",".","GFile","(","data_path",",","mode","=","\"r\"",")","as","f",":","counter","=","0","for","line","in","f",":","counter","+=","1","if","counter","%","100000","==","0",":","print","(","\" processing line %d\"","%","counter",")","tokens","=","tokenizer","(","line",")","if","tokenizer","else","basic_tokenizer","(","line",")","for","w","in","tokens",":","word","=","_DIGIT_RE",".","sub","(","\"0\"",",","w",")","if","normalize_digits","else","w","if","word","in","vocab",":","vocab","[","word","]","+=","1","else",":","vocab","[","word","]","=","1","vocab_list","=","_START_VOCAB","+","sorted","(","vocab",",","key","=","vocab",".","get",",","reverse","=","True",")","if","len","(","vocab_list",")",">","max_vocabulary_size",":","vocab_list","=","vocab_list","[",":","max_vocabulary_size","]","with","gfile",".","GFile","(","vocabulary_path",",","mode","=","\"wb\"",")","as","vocab_file",":","with","gfile",".","GFile","(","embedding_path",",","mode","=","\"wb\"",")","as","embedding_file",":","for","w","in","vocab_list",":","vocab_file",".","write","(","w","+","\"\\n\"",")","embedding_file",".","write","(","w","+","\"\\n\"",")"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/utils\/data_utils.py#L83-L125"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"utils\/data_utils.py","language":"python","identifier":"initialize_vocabulary","parameters":"(vocabulary_path)","argument_list":"","return_statement":"","docstring":"Initialize vocabulary from file.\n\n We assume the vocabulary is stored one-item-per-line, so a file:\n dog\n cat\n will result in a vocabulary {\"dog\": 0, \"cat\": 1}, and this function will\n also return the reversed-vocabulary [\"dog\", \"cat\"].\n\n Args:\n vocabulary_path: path to the file containing the vocabulary.\n\n Returns:\n a pair: the vocabulary (a dictionary mapping string to integers), and\n the reversed vocabulary (a list, which reverses the vocabulary mapping).\n\n Raises:\n ValueError: if the provided vocabulary_path does not exist.","docstring_summary":"Initialize vocabulary from file.","docstring_tokens":["Initialize","vocabulary","from","file","."],"function":"def initialize_vocabulary(vocabulary_path):\n \"\"\"Initialize vocabulary from file.\n\n We assume the vocabulary is stored one-item-per-line, so a file:\n dog\n cat\n will result in a vocabulary {\"dog\": 0, \"cat\": 1}, and this function will\n also return the reversed-vocabulary [\"dog\", \"cat\"].\n\n Args:\n vocabulary_path: path to the file containing the vocabulary.\n\n Returns:\n a pair: the vocabulary (a dictionary mapping string to integers), and\n the reversed vocabulary (a list, which reverses the vocabulary mapping).\n\n Raises:\n ValueError: if the provided vocabulary_path does not exist.\n \"\"\"\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"r\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)","function_tokens":["def","initialize_vocabulary","(","vocabulary_path",")",":","if","gfile",".","Exists","(","vocabulary_path",")",":","rev_vocab","=","[","]","with","gfile",".","GFile","(","vocabulary_path",",","mode","=","\"r\"",")","as","f",":","rev_vocab",".","extend","(","f",".","readlines","(",")",")","rev_vocab","=","[","line",".","strip","(",")","for","line","in","rev_vocab","]","vocab","=","dict","(","[","(","x",",","y",")","for","(","y",",","x",")","in","enumerate","(","rev_vocab",")","]",")","return","vocab",",","rev_vocab","else",":","raise","ValueError","(","\"Vocabulary file %s not found.\"",",","vocabulary_path",")"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/utils\/data_utils.py#L128-L155"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"utils\/data_utils.py","language":"python","identifier":"sentence_to_token_ids","parameters":"(sentence, vocabulary,\n tokenizer=None, normalize_digits=True)","argument_list":"","return_statement":"return [vocabulary.get(_DIGIT_RE.sub(\"0\", w), UNK_ID) for w in words]","docstring":"Convert a string to list of integers representing token-ids.\n\n For example, a sentence \"I have a dog\" may become tokenized into\n [\"I\", \"have\", \"a\", \"dog\"] and with vocabulary {\"I\": 1, \"have\": 2,\n \"a\": 4, \"dog\": 7\"} this function will return [1, 2, 4, 7].\n\n Args:\n sentence: the sentence in bytes format to convert to token-ids.\n vocabulary: a dictionary mapping tokens to integers.\n tokenizer: a function to use to tokenize each sentence;\n if None, basic_tokenizer will be used.\n normalize_digits: Boolean; if true, all digits are replaced by 0s.\n\n Returns:\n a list of integers, the token-ids for the sentence.","docstring_summary":"Convert a string to list of integers representing token-ids.","docstring_tokens":["Convert","a","string","to","list","of","integers","representing","token","-","ids","."],"function":"def sentence_to_token_ids(sentence, vocabulary,\n tokenizer=None, normalize_digits=True):\n \"\"\"Convert a string to list of integers representing token-ids.\n\n For example, a sentence \"I have a dog\" may become tokenized into\n [\"I\", \"have\", \"a\", \"dog\"] and with vocabulary {\"I\": 1, \"have\": 2,\n \"a\": 4, \"dog\": 7\"} this function will return [1, 2, 4, 7].\n\n Args:\n sentence: the sentence in bytes format to convert to token-ids.\n vocabulary: a dictionary mapping tokens to integers.\n tokenizer: a function to use to tokenize each sentence;\n if None, basic_tokenizer will be used.\n normalize_digits: Boolean; if true, all digits are replaced by 0s.\n\n Returns:\n a list of integers, the token-ids for the sentence.\n \"\"\"\n\n if tokenizer:\n words = tokenizer(sentence)\n else:\n words = basic_tokenizer(sentence)\n if not normalize_digits:\n return [vocabulary.get(w, UNK_ID) for w in words]\n # Normalize digits by 0 before looking words up in the vocabulary.\n return [vocabulary.get(_DIGIT_RE.sub(\"0\", w), UNK_ID) for w in words]","function_tokens":["def","sentence_to_token_ids","(","sentence",",","vocabulary",",","tokenizer","=","None",",","normalize_digits","=","True",")",":","if","tokenizer",":","words","=","tokenizer","(","sentence",")","else",":","words","=","basic_tokenizer","(","sentence",")","if","not","normalize_digits",":","return","[","vocabulary",".","get","(","w",",","UNK_ID",")","for","w","in","words","]","# Normalize digits by 0 before looking words up in the vocabulary.","return","[","vocabulary",".","get","(","_DIGIT_RE",".","sub","(","\"0\"",",","w",")",",","UNK_ID",")","for","w","in","words","]"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/utils\/data_utils.py#L158-L184"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"utils\/data_utils.py","language":"python","identifier":"data_to_token_ids","parameters":"(data_path, target_path, vocabulary_path,\n tokenizer=None, normalize_digits=True)","argument_list":"","return_statement":"","docstring":"Tokenize data file and turn into token-ids using given vocabulary file.\n\n This function loads data line-by-line from data_path, calls the above\n sentence_to_token_ids, and saves the result to target_path. See comment\n for sentence_to_token_ids on the details of token-ids format.\n\n Args:\n data_path: path to the data file in one-sentence-per-line format.\n target_path: path where the file with token-ids will be created.\n vocabulary_path: path to the vocabulary file.\n tokenizer: a function to use to tokenize each sentence;\n if None, basic_tokenizer will be used.\n normalize_digits: Boolean; if true, all digits are replaced by 0s.","docstring_summary":"Tokenize data file and turn into token-ids using given vocabulary file.","docstring_tokens":["Tokenize","data","file","and","turn","into","token","-","ids","using","given","vocabulary","file","."],"function":"def data_to_token_ids(data_path, target_path, vocabulary_path,\n tokenizer=None, normalize_digits=True):\n \"\"\"Tokenize data file and turn into token-ids using given vocabulary file.\n\n This function loads data line-by-line from data_path, calls the above\n sentence_to_token_ids, and saves the result to target_path. See comment\n for sentence_to_token_ids on the details of token-ids format.\n\n Args:\n data_path: path to the data file in one-sentence-per-line format.\n target_path: path where the file with token-ids will be created.\n vocabulary_path: path to the vocabulary file.\n tokenizer: a function to use to tokenize each sentence;\n if None, basic_tokenizer will be used.\n normalize_digits: Boolean; if true, all digits are replaced by 0s.\n \"\"\"\n if not gfile.Exists(target_path):\n print(\"Tokenizing data in %s\" % data_path)\n vocab, _ = initialize_vocabulary(vocabulary_path)\n with gfile.GFile(data_path, mode=\"r\") as data_file:\n with gfile.GFile(target_path, mode=\"w\") as tokens_file:\n counter = 0\n for line in data_file:\n counter += 1\n if counter % 100000 == 0:\n print(\" tokenizing line %d\" % counter)\n token_ids = sentence_to_token_ids(line, vocab, tokenizer,\n normalize_digits)\n tokens_file.write(\" \".join([str(tok) for tok in token_ids]) + \"\\n\")","function_tokens":["def","data_to_token_ids","(","data_path",",","target_path",",","vocabulary_path",",","tokenizer","=","None",",","normalize_digits","=","True",")",":","if","not","gfile",".","Exists","(","target_path",")",":","print","(","\"Tokenizing data in %s\"","%","data_path",")","vocab",",","_","=","initialize_vocabulary","(","vocabulary_path",")","with","gfile",".","GFile","(","data_path",",","mode","=","\"r\"",")","as","data_file",":","with","gfile",".","GFile","(","target_path",",","mode","=","\"w\"",")","as","tokens_file",":","counter","=","0","for","line","in","data_file",":","counter","+=","1","if","counter","%","100000","==","0",":","print","(","\" tokenizing line %d\"","%","counter",")","token_ids","=","sentence_to_token_ids","(","line",",","vocab",",","tokenizer",",","normalize_digits",")","tokens_file",".","write","(","\" \"",".","join","(","[","str","(","tok",")","for","tok","in","token_ids","]",")","+","\"\\n\"",")"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/utils\/data_utils.py#L187-L215"}
{"nwo":"Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow","sha":"457488444db6013ad1841c64093b348d5b8905b5","path":"utils\/data_utils.py","language":"python","identifier":"prepare_wmt_data","parameters":"(data_dir, en_vocabulary_size, fr_vocabulary_size,\n load_embeddings=False, tokenizer=None)","argument_list":"","return_statement":"return (en_train_ids_path, fr_train_ids_path,\n en_dev_ids_path, fr_dev_ids_path,\n en_vocab_path, fr_vocab_path)","docstring":"Get WMT data into data_dir, create vocabularies and tokenize data.\n\n Args:\n data_dir: directory in which the data sets will be stored.\n en_vocabulary_size: size of the English vocabulary to create and use.\n fr_vocabulary_size: size of the French vocabulary to create and use.\n tokenizer: a function to use to tokenize each data sentence;\n if None, basic_tokenizer will be used.\n\n Returns:\n A tuple of 6 elements:\n (1) path to the token-ids for English training data-set,\n (2) path to the token-ids for French training data-set,\n (3) path to the token-ids for English development data-set,\n (4) path to the token-ids for French development data-set,\n (5) path to the English vocabulary file,\n (6) path to the French vocabulary file.","docstring_summary":"Get WMT data into data_dir, create vocabularies and tokenize data.","docstring_tokens":["Get","WMT","data","into","data_dir","create","vocabularies","and","tokenize","data","."],"function":"def prepare_wmt_data(data_dir, en_vocabulary_size, fr_vocabulary_size,\n load_embeddings=False, tokenizer=None):\n \"\"\"Get WMT data into data_dir, create vocabularies and tokenize data.\n\n Args:\n data_dir: directory in which the data sets will be stored.\n en_vocabulary_size: size of the English vocabulary to create and use.\n fr_vocabulary_size: size of the French vocabulary to create and use.\n tokenizer: a function to use to tokenize each data sentence;\n if None, basic_tokenizer will be used.\n\n Returns:\n A tuple of 6 elements:\n (1) path to the token-ids for English training data-set,\n (2) path to the token-ids for French training data-set,\n (3) path to the token-ids for English development data-set,\n (4) path to the token-ids for French development data-set,\n (5) path to the English vocabulary file,\n (6) path to the French vocabulary file.\n \"\"\"\n # Get wmt data to the specified directory.\n train_path = os.path.join(data_dir, \"train.txt\")\n dev_path = os.path.join(data_dir, \"dev.txt\")\n\n # Create vocabularies of the appropriate sizes.\n fr_vocab_path = os.path.join(data_dir, \"vocab%d.out\" % fr_vocabulary_size)\n en_vocab_path = os.path.join(data_dir, \"vocab%d.in\" % en_vocabulary_size)\n create_vocabulary(fr_vocab_path, train_path + \".out\", fr_vocabulary_size,\n os.path.join(data_dir, \"dec_embedding{0}.tsv\".format(fr_vocabulary_size)),\n tokenizer)\n create_vocabulary(en_vocab_path, train_path + \".in\", en_vocabulary_size,\n os.path.join(data_dir, \"enc_embedding{0}.tsv\".format(en_vocabulary_size)),\n tokenizer)\n #if load_embeddings:\n # embed_utils.save_embeddings(fr_vocab_path, \"embed5000.txt\")\n # embed_utils.save_embeddings(en_vocab_path, \"embed5000.txt\")\n \n\n # Create token ids for the training data.\n fr_train_ids_path = train_path + (\".ids%d.out\" % fr_vocabulary_size)\n en_train_ids_path = train_path + (\".ids%d.in\" % en_vocabulary_size)\n data_to_token_ids(train_path + \".out\", fr_train_ids_path, fr_vocab_path, tokenizer)\n data_to_token_ids(train_path + \".in\", en_train_ids_path, en_vocab_path, tokenizer)\n\n # Create token ids for the development data.\n fr_dev_ids_path = dev_path + (\".ids%d.out\" % fr_vocabulary_size)\n en_dev_ids_path = dev_path + (\".ids%d.in\" % en_vocabulary_size)\n data_to_token_ids(dev_path + \".out\", fr_dev_ids_path, fr_vocab_path, tokenizer)\n data_to_token_ids(dev_path + \".in\", en_dev_ids_path, en_vocab_path, tokenizer)\n\n return (en_train_ids_path, fr_train_ids_path,\n en_dev_ids_path, fr_dev_ids_path,\n en_vocab_path, fr_vocab_path)","function_tokens":["def","prepare_wmt_data","(","data_dir",",","en_vocabulary_size",",","fr_vocabulary_size",",","load_embeddings","=","False",",","tokenizer","=","None",")",":","# Get wmt data to the specified directory.","train_path","=","os",".","path",".","join","(","data_dir",",","\"train.txt\"",")","dev_path","=","os",".","path",".","join","(","data_dir",",","\"dev.txt\"",")","# Create vocabularies of the appropriate sizes.","fr_vocab_path","=","os",".","path",".","join","(","data_dir",",","\"vocab%d.out\"","%","fr_vocabulary_size",")","en_vocab_path","=","os",".","path",".","join","(","data_dir",",","\"vocab%d.in\"","%","en_vocabulary_size",")","create_vocabulary","(","fr_vocab_path",",","train_path","+","\".out\"",",","fr_vocabulary_size",",","os",".","path",".","join","(","data_dir",",","\"dec_embedding{0}.tsv\"",".","format","(","fr_vocabulary_size",")",")",",","tokenizer",")","create_vocabulary","(","en_vocab_path",",","train_path","+","\".in\"",",","en_vocabulary_size",",","os",".","path",".","join","(","data_dir",",","\"enc_embedding{0}.tsv\"",".","format","(","en_vocabulary_size",")",")",",","tokenizer",")","#if load_embeddings:","# embed_utils.save_embeddings(fr_vocab_path, \"embed5000.txt\")","# embed_utils.save_embeddings(en_vocab_path, \"embed5000.txt\")","# Create token ids for the training data.","fr_train_ids_path","=","train_path","+","(","\".ids%d.out\"","%","fr_vocabulary_size",")","en_train_ids_path","=","train_path","+","(","\".ids%d.in\"","%","en_vocabulary_size",")","data_to_token_ids","(","train_path","+","\".out\"",",","fr_train_ids_path",",","fr_vocab_path",",","tokenizer",")","data_to_token_ids","(","train_path","+","\".in\"",",","en_train_ids_path",",","en_vocab_path",",","tokenizer",")","# Create token ids for the development data.","fr_dev_ids_path","=","dev_path","+","(","\".ids%d.out\"","%","fr_vocabulary_size",")","en_dev_ids_path","=","dev_path","+","(","\".ids%d.in\"","%","en_vocabulary_size",")","data_to_token_ids","(","dev_path","+","\".out\"",",","fr_dev_ids_path",",","fr_vocab_path",",","tokenizer",")","data_to_token_ids","(","dev_path","+","\".in\"",",","en_dev_ids_path",",","en_vocab_path",",","tokenizer",")","return","(","en_train_ids_path",",","fr_train_ids_path",",","en_dev_ids_path",",","fr_dev_ids_path",",","en_vocab_path",",","fr_vocab_path",")"],"url":"https:\/\/github.com\/Chung-I\/Variational-Recurrent-Autoencoder-Tensorflow\/blob\/457488444db6013ad1841c64093b348d5b8905b5\/utils\/data_utils.py#L218-L270"}