{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Import"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:35:49.097390Z",
     "start_time": "2019-05-15T12:35:46.251829Z"
    }
   },
   "outputs": [],
   "source": [
    "import os\n",
    "import pickle\n",
    "import copy\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n",
    "from tensorflow.python.layers import core as core_layers\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "from tensorflow.python.ops import array_ops\n",
    "import time\n",
    "import jieba\n",
    "from Util import mybleu\n",
    "from Util import myResidualCell\n",
    "import random\n",
    "import pickle as cPickle\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "def idx2str(s):\n",
    "    return ' '.join([id2w[idx] for idx in s])\n",
    "\n",
    "def str2idx(idx):\n",
    "    idx = idx.strip()\n",
    "    return [w2id[idxx] for idxx in idx.split()]\n",
    "\n",
    "\n",
    "def pad(x, pid, move_go=False):\n",
    "    max_length = 30\n",
    "    x = [k[:max_length] for k in x]\n",
    "    if move_go:\n",
    "        length_list = [len(k)-1 for k in x]\n",
    "    else:\n",
    "        length_list = [len(k) for k in x]\n",
    "    max_length = max(length_list)\n",
    "    pad_x = []\n",
    "    for k in x:\n",
    "        if move_go:\n",
    "            pad_k = k[1:] + [pid,] * (max_length - len(k[1:]))\n",
    "        else:\n",
    "            pad_k = k + [pid,] * (max_length - len(k))\n",
    "        pad_x.append(pad_k)\n",
    "    return pad_x, length_list\n",
    "\n",
    "def pad_maxlength(x, pid, move_go=False):\n",
    "    max_length = 30\n",
    "    if move_go:\n",
    "        length_list = [len(k)-1 for k in x]\n",
    "    else:\n",
    "        length_list = [min(len(k), max_length) for k in x]\n",
    "    \n",
    "    pad_x = []\n",
    "    for k in x:\n",
    "        if move_go:\n",
    "            pad_k = k[1:] + [pid,] * (max_length - len(k[1:]))\n",
    "        else:\n",
    "            pad_k = k[:max_length] + [pid,] * (max_length - len(k))\n",
    "        pad_x.append(pad_k)\n",
    "    return pad_x, length_list\n",
    "\n",
    "tf.logging.set_verbosity(tf.logging.INFO)\n",
    "sess_conf = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:35:49.423082Z",
     "start_time": "2019-05-15T12:35:49.100068Z"
    }
   },
   "outputs": [],
   "source": [
    "import nltk\n",
    "def word_overlap_edit(s1, s2):\n",
    "    t1 = set(s1.split())\n",
    "    t2 = set(s2.split())\n",
    "    word_overlap = float(len(t1 & t2)) / len(t1 | t2)\n",
    "    edit_distance = 1 - float(nltk.edit_distance(s1.split(), s2.split())) /  max(len(s1.split()), len(s2.split()))\n",
    "    return word_overlap, edit_distance"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-03-25T09:14:17.120168Z",
     "start_time": "2019-03-25T09:14:17.115831Z"
    }
   },
   "source": [
    "## Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-04-26T08:01:32.631416Z",
     "start_time": "2019-04-26T08:01:32.530295Z"
    }
   },
   "outputs": [],
   "source": [
    "class VAE:\n",
    "    def __init__(self, dp, rnn_size, n_layers, Lambda, gamma, num_classes, latent_dim, encoder_embedding_dim, decoder_embedding_dim, max_infer_length,\n",
    "                 sess, lr=0.001, grad_clip=5.0, beam_width=10, force_teaching_ratio=1.0, beam_penalty=1.0,\n",
    "                 residual=False, output_keep_prob=0.5, input_keep_prob=0.9, bow_size=400, predictor_size=None,\n",
    "                 is_inference=False, latent_weight=0.4, beta_decay_period=10, beta_decay_offset=5, cell_type='lstm', reverse=False,\n",
    "                 decay_scheme='luong234'):\n",
    "        \n",
    "        self.rnn_size = rnn_size\n",
    "        self.n_layers = n_layers\n",
    "        self.bow_size = bow_size\n",
    "        if not predictor_size:\n",
    "            self.predictor_size = self.rnn_size * 4\n",
    "        else:\n",
    "            self.predictor_size = predictor_size\n",
    "        self.Lambda = Lambda\n",
    "        self.grad_clip = grad_clip\n",
    "        self.dp = dp\n",
    "        self.latent_weight = latent_weight\n",
    "        self.beta_decay_period = beta_decay_period\n",
    "        self.beta_decay_offset = beta_decay_offset\n",
    "        self.latent_dim = latent_dim\n",
    "        self.gamma = gamma\n",
    "        self.step = 0\n",
    "        self.num_classes = num_classes\n",
    "        self.encoder_embedding_dim = encoder_embedding_dim\n",
    "        self.decoder_embedding_dim = decoder_embedding_dim\n",
    "        self.beam_width = beam_width\n",
    "        self.is_inference = is_inference\n",
    "        self.beam_penalty = beam_penalty\n",
    "        self.max_infer_length = max_infer_length\n",
    "        self.residual = residual\n",
    "        self.decay_scheme = decay_scheme\n",
    "        if self.residual:\n",
    "            assert encoder_embedding_dim == rnn_size\n",
    "            assert decoder_embedding_dim == rnn_size\n",
    "        self.reverse = reverse\n",
    "        self.cell_type = cell_type\n",
    "        self.force_teaching_ratio = force_teaching_ratio\n",
    "        self._output_keep_prob = output_keep_prob\n",
    "        self._input_keep_prob = input_keep_prob\n",
    "        self.sess = sess\n",
    "        self.lr=lr\n",
    "        self.build_graph()\n",
    "        self.sess.run(tf.global_variables_initializer())\n",
    "        self.saver = tf.train.Saver(tf.trainable_variables(), max_to_keep = 35)\n",
    "        self.summary_placeholders, self.update_ops, self.summary_op = self.setup_summary()\n",
    "        \n",
    "    # end constructor\n",
    "\n",
    "    def build_graph(self):\n",
    "        self.register_symbols()\n",
    "        self.add_input_layer()\n",
    "        self.add_encoder_layer()\n",
    "        self.add_stochastic_layer()\n",
    "        self.add_decoder_hidden()\n",
    "        with tf.variable_scope('decode'):\n",
    "            self.add_decoder_for_training()\n",
    "        with tf.variable_scope('decode', reuse=True):\n",
    "            self.add_decoder_for_inference()\n",
    "        with tf.variable_scope('decode', reuse=True):\n",
    "            self.add_decoder_for_prefix_inference()\n",
    "        with tf.variable_scope('predictor_layer'):\n",
    "            self.add_classifer()\n",
    "        self.add_backward_path()\n",
    "    # end method\n",
    "    \n",
    "    def _item_or_tuple(self, seq):\n",
    "        \"\"\"Returns `seq` as tuple or the singular element.\n",
    "        Which is returned is determined by how the AttentionMechanism(s) were passed\n",
    "        to the constructor.\n",
    "        Args:\n",
    "          seq: A non-empty sequence of items or generator.\n",
    "        Returns:\n",
    "           Either the values in the sequence as a tuple if AttentionMechanism(s)\n",
    "           were passed to the constructor as a sequence or the singular element.\n",
    "        \"\"\"\n",
    "        t = tuple(seq)\n",
    "        if self._is_multi:\n",
    "            return t\n",
    "        else:\n",
    "            return t[0]\n",
    "        \n",
    "    def add_input_layer(self):\n",
    "        self.X = tf.placeholder(tf.int32, [None, None], name=\"X\")\n",
    "        self.Y = tf.placeholder(tf.int32, [None, None], name=\"Y\")\n",
    "        self.X_seq_len = tf.placeholder(tf.int32, [None], name=\"X_seq_len\")\n",
    "        self.Y_seq_len = tf.placeholder(tf.int32, [None], name=\"Y_seq_len\")\n",
    "        self.C = tf.placeholder(tf.int32, [None, self.num_classes], name='C')\n",
    "        self.input_keep_prob = tf.placeholder(tf.float32,name=\"input_keep_prob\")\n",
    "        self.output_keep_prob = tf.placeholder(tf.float32,name=\"output_keep_prob\")\n",
    "        self.batch_size = tf.shape(self.X)[0]\n",
    "        self.B = tf.placeholder(tf.float32, name='Beta_deterministic_warmup')\n",
    "        self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n",
    "        self.predictor_global_step = tf.Variable(0, name=\"predictor_global_step\", trainable=False)\n",
    "        self.bow_global_step = tf.Variable(0, name=\"bow_global_step\", trainable=False)\n",
    "    # end method\n",
    "\n",
    "    def single_cell(self, reuse=False):\n",
    "        if self.cell_type == 'lstm':\n",
    "             cell = tf.contrib.rnn.LayerNormBasicLSTMCell(self.rnn_size, reuse=reuse)\n",
    "        else:\n",
    "            cell = tf.contrib.rnn.GRUBlockCell(self.rnn_size)    \n",
    "        cell = tf.contrib.rnn.DropoutWrapper(cell, self.output_keep_prob, self.input_keep_prob)\n",
    "        if self.residual:\n",
    "            cell = myResidualCell.ResidualWrapper(cell)\n",
    "        return cell\n",
    "    \n",
    "    \n",
    "    def add_encoder_layer(self):\n",
    "        encoder_embedding = tf.get_variable('encoder_embedding', [len(self.dp.X_w2id), self.encoder_embedding_dim],\n",
    "                                             tf.float32, tf.random_uniform_initializer(-1.0, 1.0))\n",
    "        \n",
    "        self.encoder_inputs = tf.nn.embedding_lookup(encoder_embedding, self.X)\n",
    "        bi_encoder_output, bi_encoder_state = tf.nn.bidirectional_dynamic_rnn(\n",
    "            cell_fw = tf.contrib.rnn.MultiRNNCell([self.single_cell() for _ in range(self.n_layers)]), \n",
    "            cell_bw = tf.contrib.rnn.MultiRNNCell([self.single_cell() for _ in range(self.n_layers)]),\n",
    "            inputs = self.encoder_inputs,\n",
    "            sequence_length = self.X_seq_len,\n",
    "            dtype = tf.float32,\n",
    "            scope = 'bidirectional_rnn')\n",
    "        \n",
    "        if self.cell_type == 'lstm':\n",
    "            self.encoder_out = tf.concat([bi_encoder_state[0][-1][1],bi_encoder_state[1][-1][1]], -1)\n",
    "        else:\n",
    "            self.encoder_out = tf.concat([bi_encoder_state[0][-1],bi_encoder_state[1][-1]], -1)\n",
    "        \n",
    "        #print('encoder_out', self.encoder_out)\n",
    "        \n",
    "    def add_stochastic_layer(self):\n",
    "        #self.z_mu = tf.layers.dense(self.encoder_out, self.latent_dim)\n",
    "        #self.z_lgs2 = tf.layers.dense(self.encoder_out, self.latent_dim)\n",
    "        #noise = tf.random_normal(tf.shape(self.z_lgs2))\n",
    "        self.z_mu = tf.layers.dense(self.encoder_out, self.latent_dim)\n",
    "        self.z_lgs2 = tf.layers.dense(self.encoder_out, self.latent_dim)\n",
    "        noise = tf.random_normal(tf.shape(self.z_lgs2))\n",
    "        if self.is_inference:\n",
    "            self.z = self.z_mu\n",
    "        else:\n",
    "            self.z = self.z_mu + tf.exp(0.5 * self.z_lgs2) * noise\n",
    "        with tf.variable_scope('bow_layer'):\n",
    "            self.bow_fc1 = tf.layers.dense(self.z, self.bow_size, activation=tf.tanh, name=\"bow_fc1\")\n",
    "            self.bow_fc1 = tf.nn.dropout(self.bow_fc1, self.output_keep_prob)\n",
    "            #print('bow_fc1', self.bow_fc1)\n",
    "            self.bow_logits = tf.layers.dense(self.bow_fc1, len(self.dp.Y_w2id), activation=None, name=\"bow_project\")\n",
    "            #print('bow_logits', self.bow_logits)\n",
    "    \n",
    "    def add_classifer(self):\n",
    "        #print('self.encoder_out', self.encoder_out)\n",
    "        h_hat = self.z\n",
    "        #self.z = self.encoder_out\n",
    "        fc = tf.layers.dense(h_hat, self.predictor_size, name='fc1')\n",
    "        fc = tf.contrib.layers.dropout(fc, self.output_keep_prob)\n",
    "        fc = tf.nn.relu(fc)\n",
    "        self.fc = fc\n",
    "        self.logits = tf.layers.dense(fc, self.num_classes, name='fc2')\n",
    "        #self.y_pred_cls = tf.argmax(tf.nn.softmax(self.logits), 1)  # 预测类别\n",
    "        self.ypred_for_auc = tf.nn.softmax(self.logits)\n",
    "        self.predictions = tf.argmax(self.logits, 1, name=\"predictions\")\n",
    "        \n",
    "        correct_pred = tf.equal(self.predictions, tf.argmax(self.C, 1))\n",
    "        #print('correct_pred', correct_pred)\n",
    "        self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
    "        \n",
    "    def add_decoder_hidden(self):\n",
    "        hidden_state_list = []\n",
    "        for i in range(self.n_layers * 2):\n",
    "            if self.cell_type == 'gru':\n",
    "                hidden_state_list.append(tf.layers.dense(self.z, self.rnn_size))\n",
    "            else:\n",
    "                hidden_state_list.append(tf.contrib.rnn.LSTMStateTuple(tf.layers.dense(self.z, self.rnn_size), tf.layers.dense(self.z, self.rnn_size))) \n",
    "        self.decoder_init_state = tuple(hidden_state_list)\n",
    "        #print('self.decoder_init_state', self.decoder_init_state)\n",
    "        \n",
    "    def processed_decoder_input(self):\n",
    "        main = tf.strided_slice(self.Y, [0, 0], [self.batch_size, -1], [1, 1]) # remove last char\n",
    "        decoder_input = tf.concat([tf.fill([self.batch_size, 1], self._y_go), main], 1)\n",
    "        return decoder_input\n",
    "\n",
    "    def add_decoder_for_training(self):\n",
    "        self.decoder_cell = tf.contrib.rnn.MultiRNNCell([self.single_cell() for _ in range(2 * self.n_layers)])\n",
    "        decoder_embedding = tf.get_variable('decoder_embedding', [len(self.dp.Y_w2id), self.decoder_embedding_dim],\n",
    "                                             tf.float32, tf.random_uniform_initializer(-1.0, 1.0))\n",
    "        emb = tf.nn.embedding_lookup(decoder_embedding, self.processed_decoder_input())\n",
    "        inputs = tf.expand_dims(self.z, 1)\n",
    "        inputs = tf.tile(inputs, [1, tf.shape(emb)[1], 1])\n",
    "        inputs = tf.concat([emb, inputs],2) \n",
    "        training_helper = tf.contrib.seq2seq.TrainingHelper(\n",
    "            inputs = inputs,\n",
    "            sequence_length = self.Y_seq_len,\n",
    "            time_major = False)\n",
    "        training_decoder = tf.contrib.seq2seq.BasicDecoder(\n",
    "            cell = self.decoder_cell,\n",
    "            helper = training_helper,\n",
    "            initial_state = self.decoder_init_state, #self.decoder_cell.zero_state(self.batch_size, tf.float32),\n",
    "            output_layer = core_layers.Dense(len(self.dp.Y_w2id)))\n",
    "        training_decoder_output, training_final_state, _ = tf.contrib.seq2seq.dynamic_decode(\n",
    "            decoder = training_decoder,\n",
    "            impute_finished = True,\n",
    "            maximum_iterations = tf.reduce_max(self.Y_seq_len))\n",
    "        self.training_logits = training_decoder_output.rnn_output\n",
    "        self.init_prefix_state = training_final_state\n",
    "\n",
    "    def add_decoder_for_inference(self):   \n",
    "        decoder_embedding = tf.get_variable('decoder_embedding')\n",
    "        self.beam_f = (lambda ids: tf.concat([tf.nn.embedding_lookup(decoder_embedding, ids), \n",
    "                                    tf.tile(tf.expand_dims(self.z, 1), \n",
    "                                            [1,int(tf.nn.embedding_lookup(decoder_embedding, ids).get_shape()[1]), 1]) if len(ids.get_shape()) !=1 \n",
    "                                             else self.z], -1))\n",
    "\n",
    "        predicting_decoder = tf.contrib.seq2seq.BeamSearchDecoder(\n",
    "            cell = self.decoder_cell,\n",
    "            embedding = self.beam_f, \n",
    "            start_tokens = tf.tile(tf.constant([self._y_go], dtype=tf.int32), [self.batch_size]),\n",
    "            end_token = self._y_eos,\n",
    "            initial_state = tf.contrib.seq2seq.tile_batch(self.decoder_init_state, self.beam_width),#self.decoder_cell.zero_state(self.batch_size * self.beam_width, tf.float32),\n",
    "            beam_width = self.beam_width,\n",
    "            output_layer = core_layers.Dense(len(self.dp.Y_w2id), _reuse=True),\n",
    "            length_penalty_weight = self.beam_penalty)\n",
    "        predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(\n",
    "            decoder = predicting_decoder,\n",
    "            impute_finished = False,\n",
    "            maximum_iterations = self.max_infer_length)\n",
    "        self.predicting_ids = predicting_decoder_output.predicted_ids\n",
    "        self.score = predicting_decoder_output.beam_search_decoder_output.scores\n",
    "        \n",
    "    def add_decoder_for_prefix_inference(self):   \n",
    "        predicting_decoder = tf.contrib.seq2seq.BeamSearchDecoder(\n",
    "            cell = self.decoder_cell,\n",
    "            embedding = self.beam_f,\n",
    "            start_tokens = tf.tile(tf.constant([self._y_go], dtype=tf.int32), [self.batch_size]),\n",
    "            end_token = self._y_eos,\n",
    "            initial_state = tf.contrib.seq2seq.tile_batch(self.init_prefix_state, self.beam_width),\n",
    "            beam_width = self.beam_width,\n",
    "            output_layer = core_layers.Dense(len(self.dp.Y_w2id), _reuse=True),\n",
    "            length_penalty_weight = self.beam_penalty)\n",
    "        \n",
    "        self.prefix_go = tf.placeholder(tf.int32, [None])\n",
    "        prefix_go_beam = tf.tile(tf.expand_dims(self.prefix_go, 1), [1, self.beam_width])\n",
    "        prefix_emb = self.beam_f(prefix_go_beam)\n",
    "        predicting_decoder._start_inputs = prefix_emb\n",
    "        predicting_prefix_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(\n",
    "            decoder = predicting_decoder,\n",
    "            impute_finished = False,\n",
    "            maximum_iterations = self.max_infer_length)\n",
    "        self.predicting_prefix_ids = predicting_prefix_decoder_output.predicted_ids\n",
    "        self.prefix_score = predicting_prefix_decoder_output.beam_search_decoder_output.scores\n",
    "    \n",
    "    def add_backward_path(self):\n",
    "        #print(self.logits, self.C)\n",
    "        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.C)\n",
    "        self.c_loss = tf.reduce_mean(cross_entropy)\n",
    "        \n",
    "        masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)\n",
    "        self.r_loss = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,\n",
    "                                                     targets = self.Y,\n",
    "                                                     weights = masks)\n",
    "        self.all_reconstruct_loss = tf.reduce_sum(tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,\n",
    "                                                     targets = self.Y,\n",
    "                                                     weights = masks,\n",
    "                                                     average_across_timesteps=False))\n",
    "        self.kl_loss = tf.reduce_mean(-0.5 * tf.reduce_sum(1 + self.z_lgs2 - tf.square(self.z_mu) - tf.exp(self.z_lgs2), 1))\n",
    "        \n",
    "        max_out_len = array_ops.shape(self.Y)[1]\n",
    "        self.tile_bow_logits = tf.tile(tf.expand_dims(self.bow_logits, 1), [1, max_out_len, 1])\n",
    "        labels = self.Y\n",
    "        label_mask = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)\n",
    "        bow_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.tile_bow_logits, labels=labels) * label_mask\n",
    "        bow_loss = tf.reduce_sum(bow_loss, reduction_indices=1)\n",
    "        self.avg_bow_loss = tf.reduce_mean(bow_loss)\n",
    "        \n",
    "        self.loss = self.Lambda * self.c_loss + (1 - self.Lambda) * (self.r_loss + self.B * self.latent_weight * self.kl_loss + self.avg_bow_loss)\n",
    "        params = tf.trainable_variables()\n",
    "        gradients = tf.gradients(self.loss, params)\n",
    "        clipped_gradients, _ = tf.clip_by_global_norm(gradients, self.grad_clip)\n",
    "        self.learning_rate = tf.constant(self.lr)\n",
    "        self.learning_rate = self.get_learning_rate_decay(self.decay_scheme)  # decay\n",
    "        self.train_op = tf.train.AdamOptimizer(self.learning_rate).apply_gradients(zip(clipped_gradients, params), global_step=self.global_step)\n",
    "        \n",
    "        self.Dgrad = tf.gradients(self.c_loss, [self.z])\n",
    "        self.Dgradmu = tf.gradients(self.c_loss, [self.z_mu])\n",
    "        \n",
    "        #---- predictor -----#\n",
    "        params_predictor = [v for v in tf.trainable_variables() if 'predictor_layer' in v.name]\n",
    "        print('params_predictor', params_predictor)\n",
    "        gradients_predictor = tf.gradients(self.c_loss, params_predictor)\n",
    "        #clipped_gradients_predictor, _ = tf.clip_by_global_norm(gradients_predictor, self.grad_clip)\n",
    "        self.predictor_op = tf.train.AdamOptimizer(self.learning_rate).apply_gradients(zip(gradients_predictor, params_predictor), global_step=self.predictor_global_step)\n",
    "\n",
    "        #---- bow---------#\n",
    "        params_bow = [v for v in tf.trainable_variables() if 'bow_layer' in v.name]\n",
    "        print('params_bow', params_bow)\n",
    "        gradients_bow = tf.gradients(self.avg_bow_loss, params_bow)\n",
    "        #clipped_gradients_bow, _ = tf.clip_by_global_norm(gradients_bow, self.grad_clip)\n",
    "        self.bow_op = tf.train.AdamOptimizer(self.learning_rate).apply_gradients(zip(gradients_bow, params_bow), global_step=self.bow_global_step)\n",
    "       \n",
    "        self.Bowgradmu = tf.gradients(self.avg_bow_loss, [self.z_mu])\n",
    "        \n",
    "    def register_symbols(self):\n",
    "        self._x_go = self.dp.X_w2id['<GO>']\n",
    "        self._x_eos = self.dp.X_w2id['<EOS>']\n",
    "        self._x_pad = self.dp.X_w2id['<PAD>']\n",
    "        self._x_unk = self.dp.X_w2id['<UNK>']\n",
    "        \n",
    "        self._y_go = self.dp.Y_w2id['<GO>']\n",
    "        self._y_eos = self.dp.Y_w2id['<EOS>']\n",
    "        self._y_pad = self.dp.Y_w2id['<PAD>']\n",
    "        self._y_unk = self.dp.Y_w2id['<UNK>']\n",
    "        \n",
    "    def xToz(self, input_word):\n",
    "        #print(input_word)\n",
    "        input_word = input_word.split()\n",
    "        #print(input_word)\n",
    "        input_indices = [self.dp.X_w2id.get(char, self._x_unk) for char in input_word]\n",
    "        #print(input_indices)\n",
    "        z = self.sess.run(self.z, {self.X: [input_indices], self.X_seq_len: [len(input_indices)], self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        return z\n",
    "    \n",
    "    def xTozmu(self, input_word):\n",
    "        #print(input_word)\n",
    "        input_word = input_word.split()\n",
    "        #print(input_word)\n",
    "        input_indices = [self.dp.X_w2id.get(char, self._x_unk) for char in input_word]\n",
    "        #print(input_indices)\n",
    "        z = self.sess.run(self.z_mu, {self.X: [input_indices], self.X_seq_len: [len(input_indices)], self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        return z\n",
    "    \n",
    "    def xToz_grad(self, input_word, c):\n",
    "        c_ = [0 for _ in range(self.num_classes)]\n",
    "        c_[c] = 1\n",
    "        #print(input_word)\n",
    "        input_word = input_word.split()\n",
    "        #print(input_word)\n",
    "        input_indices = [self.dp.X_w2id.get(char, self._x_unk) for char in input_word]\n",
    "        #print(input_indices)\n",
    "        grad, z = self.sess.run([self.Dgrad, self.z], {self.C: [c_], self.X: [input_indices], self.X_seq_len: [len(input_indices)], self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        return z, grad\n",
    "    \n",
    "    def xTozmu_grad(self, input_word, c):\n",
    "        c_ = [0 for _ in range(self.num_classes)]\n",
    "        c_[c] = 1\n",
    "        #print(input_word)\n",
    "        input_word = input_word.split()\n",
    "        #print(input_word)\n",
    "        input_indices = [self.dp.X_w2id.get(char, self._x_unk) for char in input_word]\n",
    "        #print(input_indices)\n",
    "        grad, z = self.sess.run([self.Dgrad, self.z_mu], {self.C: [c_], self.X: [input_indices], self.X_seq_len: [len(input_indices)], self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        return z, grad\n",
    "    \n",
    "    def xTozmu_gradmu(self, input_word, c):\n",
    "        c_ = [0 for _ in range(self.num_classes)]\n",
    "        c_[c] = 1\n",
    "        #print(input_word)\n",
    "        input_word = input_word.split()\n",
    "        #print(input_word)\n",
    "        input_indices = [self.dp.X_w2id.get(char, self._x_unk) for char in input_word]\n",
    "        #print(input_indices)\n",
    "        grad, z = self.sess.run([self.Dgradmu, self.z_mu], {self.C: [c_], self.X: [input_indices], self.X_seq_len: [len(input_indices)], self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        return z, grad\n",
    "    \n",
    "    def zmu_gradCmu(self, z_mu, c):\n",
    "        c_ = [0 for _ in range(self.num_classes)]\n",
    "        c_[c] = 1\n",
    "        grad = self.sess.run(self.Dgradmu, {self.C: [c_], self.z: z_mu, self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        return grad\n",
    "    \n",
    "    def zmu_gradBowCmu(self, z_mu, c, content):\n",
    "        c_ = [0 for _ in range(self.num_classes)]\n",
    "        c_[c] = 1\n",
    "        content = content.split()\n",
    "        content_indices = [self.dp.X_w2id.get(char, self._x_unk) for char in content]\n",
    "        grad, bgrad = self.sess.run([self.Dgrad, self.Bowgradmu], {self.Y: [content_indices], \n",
    "                                                              self.C: [c_],\n",
    "                                                              self.z: z_mu,\n",
    "                                                              self.Y_seq_len: [len(content_indices)],\n",
    "                                                              self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        return grad, bgrad\n",
    "    \n",
    "    def idxTozmu_batch(self, input_indices):\n",
    "        input_indices_pad, length_list = self.pad(input_indices)\n",
    "        z = self.sess.run(self.z_mu, {self.X: input_indices_pad, self.X_seq_len: length_list, \n",
    "                                      self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        return z\n",
    "    \n",
    "    def zmuTox_batch(self, z_mu_batch):\n",
    "        out_indices = self.sess.run(self.predicting_ids, {self.batch_size:z_mu_batch.shape[0],\n",
    "            self.z:z_mu_batch, self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        outputs = []\n",
    "        outputs_idx = []\n",
    "        for idx in range(out_indices.shape[0]):\n",
    "            eos_id = self.dp.Y_w2id['<EOS>']\n",
    "            ot = out_indices[idx,:,0]\n",
    "            ot = ot.tolist()\n",
    "            if eos_id in ot:\n",
    "                ot = ot[:ot.index(eos_id)]\n",
    "            if self.reverse:\n",
    "                ot = ot[::-1]\n",
    "            output_str = ' '.join([self.dp.Y_id2w.get(i, u'&') for i in ot])\n",
    "            outputs_idx.append(ot)\n",
    "            outputs.append(output_str)\n",
    "        return outputs, outputs_idx\n",
    "    \n",
    "    def xTozmu_gradBowmu(self, input_word, content):\n",
    "        input_word = input_word.split()\n",
    "        content = content.split()\n",
    "        #print(input_word)\n",
    "        content_indices = [self.dp.X_w2id.get(char, self._x_unk) for char in content]\n",
    "        input_indices = [self.dp.X_w2id.get(char, self._x_unk) for char in input_word]\n",
    "        #print(input_indices)\n",
    "        grad, z = self.sess.run([self.Bowgradmu, self.z_mu], {self.Y: [content_indices], \n",
    "                                                              self.X: [input_indices], \n",
    "                                                              self.Y_seq_len: [len(content_indices)],\n",
    "                                                              self.X_seq_len: [len(input_indices)], \n",
    "                                                              self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        return z, grad\n",
    "    \n",
    "    def xTozmu_gradBowCmu(self, input_word, c, content):\n",
    "        c_ = [0 for _ in range(self.num_classes)]\n",
    "        c_[c] = 1\n",
    "        input_word = input_word.split()\n",
    "        content = content.split()\n",
    "        #print(input_word)\n",
    "        content_indices = [self.dp.X_w2id.get(char, self._x_unk) for char in content]\n",
    "        input_indices = [self.dp.X_w2id.get(char, self._x_unk) for char in input_word]\n",
    "        #print(input_indices)\n",
    "        dgrad, grad, z = self.sess.run([self.Dgrad, self.Bowgradmu, self.z_mu], {self.Y: [content_indices], \n",
    "                                                              self.X: [input_indices], \n",
    "                                                              self.C: [c_],\n",
    "                                                              self.Y_seq_len: [len(content_indices)],\n",
    "                                                              self.X_seq_len: [len(input_indices)], \n",
    "                                                              self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        return z, dgrad, grad\n",
    "    \n",
    "    def zTograd(self, z, c):\n",
    "        c_ = [0 for _ in range(self.num_classes)]\n",
    "        c_[c] = 1\n",
    "        grad = self.sess.run(self.Dgrad, {self.C: [c_], self.z: z, self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        return grad\n",
    "    \n",
    "    def zTox(self, z):\n",
    "        out_indices, c, auc = self.sess.run([self.predicting_ids, self.predictions, self.ypred_for_auc], {self.batch_size:z.shape[0],\n",
    "            self.z:z, self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        outputs = []\n",
    "        for idx in range(out_indices.shape[-1]):\n",
    "            eos_id = self.dp.Y_w2id['<EOS>']\n",
    "            ot = out_indices[0,:,idx]\n",
    "            ot = ot.tolist()\n",
    "            if eos_id in ot:\n",
    "                ot = ot[:ot.index(eos_id)]\n",
    "            if self.reverse:\n",
    "                ot = ot[::-1]\n",
    "            output_str = ' '.join([self.dp.Y_id2w.get(i, u'&') for i in ot])\n",
    "            outputs.append(output_str)\n",
    "        return outputs, c[0], auc[0]\n",
    "    \n",
    "    def generate(self, batch_size = 6):\n",
    "        out_indices, c_lists = self.sess.run([self.predicting_ids, self.predictions], { self.batch_size:batch_size,\n",
    "            self.z:np.random.randn(batch_size, self.latent_dim), self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        outputs = []\n",
    "        for idx in range(out_indices.shape[0]):\n",
    "            eos_id = self.dp.Y_w2id['<EOS>']\n",
    "            ot = out_indices[idx,:,0]   # The 0th beam of each batch \n",
    "            ot = ot.tolist()\n",
    "            if eos_id in ot:\n",
    "                ot = ot[:ot.index(eos_id)]\n",
    "            if self.reverse:\n",
    "                ot = ot[::-1]\n",
    "            output_str = ' '.join([self.dp.Y_id2w.get(i, u'&') for i in ot])\n",
    "            outputs.append(output_str)\n",
    "        return outputs, c_lists\n",
    "    \n",
    "    def infer(self, input_word):\n",
    "        if self.reverse:\n",
    "            input_word = input_word[::-1]\n",
    "        input_indices = [self.dp.X_w2id.get(char, self._x_unk) for char in input_word.split()]\n",
    "        out_indices = self.sess.run(self.predicting_ids, {\n",
    "            self.X: [input_indices], self.X_seq_len: [len(input_indices)], self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        outputs = []\n",
    "        for idx in range(out_indices.shape[-1]):\n",
    "            eos_id = self.dp.Y_w2id['<EOS>']\n",
    "            ot = out_indices[0,:,idx]\n",
    "            ot = ot.tolist()\n",
    "            if eos_id in ot:\n",
    "                ot = ot[:ot.index(eos_id)]\n",
    "            if self.reverse:\n",
    "                ot = ot[::-1]\n",
    "            output_str = ' '.join([self.dp.Y_id2w.get(i, u'&') for i in ot])\n",
    "            outputs.append(output_str)\n",
    "        return outputs\n",
    "    \n",
    "    def infer_with_c(self, input_word):\n",
    "        input_indices = [self.dp.X_w2id.get(char, self._x_unk) for char in input_word.split()]\n",
    "        out_indices, C = self.sess.run([self.predicting_ids, self.predictions], {\n",
    "            self.X: [input_indices], self.X_seq_len: [len(input_indices)], self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        outputs = []\n",
    "        for idx in range(out_indices.shape[-1]):\n",
    "            eos_id = self.dp.Y_w2id['<EOS>']\n",
    "            ot = out_indices[0,:,idx]\n",
    "            ot = ot.tolist()\n",
    "            if eos_id in ot:\n",
    "                ot = ot[:ot.index(eos_id)]\n",
    "            if self.reverse:\n",
    "                ot = ot[::-1]\n",
    "            output_str = ' '.join([self.dp.Y_id2w.get(i, u'&') for i in ot])\n",
    "            outputs.append(output_str)\n",
    "        return outputs, C[0]\n",
    "    \n",
    "    def infer_with_c_batch(self, input_word_batch):\n",
    "        input_indices_batch = np.array([[self.dp.X_w2id.get(char, self._x_unk) for char in input_word.split()] for input_word in input_word_batch])\n",
    "        input_indices_batch, input_indices_lengths = self.dp.pad_sentence_batch(input_indices_batch, self.dp._y_pad)\n",
    "        out_indices, C = self.sess.run([self.predicting_ids, self.predictions], {\n",
    "            self.X: input_indices_batch, self.X_seq_len: input_indices_lengths, self.output_keep_prob:1, self.input_keep_prob:1})\n",
    "        outputs = []\n",
    "        for idx in range(out_indices.shape[0]):\n",
    "            eos_id = self.dp.Y_w2id['<EOS>']\n",
    "            ot = out_indices[idx,:,0]\n",
    "            ot = ot.tolist()\n",
    "            if eos_id in ot:\n",
    "                ot = ot[:ot.index(eos_id)]\n",
    "            if self.reverse:\n",
    "                ot = ot[::-1]\n",
    "            output_str = ' '.join([self.dp.Y_id2w.get(i, u'&') for i in ot])\n",
    "            outputs.append(output_str)\n",
    "        return outputs, C\n",
    "    \n",
    "    def prefix_infer(self, input_word, prefix):\n",
    "        input_indices_X = [self.dp.X_w2id.get(char, self._x_unk) for char in input_word.split()]\n",
    "        input_indices_Y = [self.dp.Y_w2id.get(char, self._y_unk) for char in prefix.split()]\n",
    "        prefix_go = []\n",
    "        prefix_go.append(input_indices_Y[-1]) \n",
    "        out_indices, scores = self.sess.run([self.predicting_prefix_ids, self.prefix_score], {\n",
    "            self.X: [input_indices_X], self.X_seq_len: [len(input_indices_X)], self.Y:[input_indices_Y], self.Y_seq_len:[len(input_indices_Y)],\n",
    "            self.prefix_go: prefix_go, self.input_keep_prob:1, self.output_keep_prob:1})\n",
    "        \n",
    "        outputs = []\n",
    "        for idx in range(out_indices.shape[-1]):\n",
    "            eos_id = self.dp.Y_w2id['<EOS>']\n",
    "            ot = out_indices[0,:,idx]\n",
    "            ot = ot.tolist()\n",
    "            if eos_id in ot:\n",
    "                ot = ot[:ot.index(eos_id)]\n",
    "                if self.reverse:\n",
    "                    ot = ot[::-1]\n",
    "            if self.reverse:\n",
    "                output_str = ' '.join([self.dp.Y_id2w.get(i, u'&') for i in ot]) + ' ' + prefix\n",
    "            else:\n",
    "                output_str = prefix + ' ' + ' '.join([self.dp.Y_id2w.get(i, u'&') for i in ot])\n",
    "            outputs.append(output_str)\n",
    "        return outputs\n",
    "       \n",
    "    def pad(self, x, move_go=False):\n",
    "        if move_go:\n",
    "            length_list = [len(k)-1 for k in x]\n",
    "        else:\n",
    "            length_list = [len(k) for k in x]\n",
    "        max_length = max(length_list)\n",
    "        pad_x = []\n",
    "        for k in x:\n",
    "            if move_go:\n",
    "                pad_k = k[1:] + [self.dp.X_w2id['<PAD>'],] * (max_length - len(k[1:]))\n",
    "            else:\n",
    "                pad_k = k + [self.dp.X_w2id['<PAD>'],] * (max_length - len(k))\n",
    "            pad_x.append(pad_k)\n",
    "        return pad_x, length_list\n",
    "    \n",
    "    def restore(self, path):\n",
    "        self.saver.restore(self.sess, path)\n",
    "        print('restore %s success' % path)\n",
    "        \n",
    "    def get_learning_rate_decay(self, decay_scheme='luong234'):\n",
    "        num_train_steps = self.dp.num_steps\n",
    "        if decay_scheme == \"luong10\":\n",
    "            start_decay_step = int(num_train_steps / 2)\n",
    "            remain_steps = num_train_steps - start_decay_step\n",
    "            decay_steps = int(remain_steps / 10)  # decay 10 times\n",
    "            decay_factor = 0.5\n",
    "        else:\n",
    "            start_decay_step = int(num_train_steps * 2 / 3)\n",
    "            remain_steps = num_train_steps - start_decay_step\n",
    "            decay_steps = int(remain_steps / 4)  # decay 4 times\n",
    "            decay_factor = 0.5\n",
    "        return tf.cond(\n",
    "            self.global_step < start_decay_step,\n",
    "            lambda: self.learning_rate,\n",
    "            lambda: tf.train.exponential_decay(\n",
    "                self.learning_rate,\n",
    "                (self.global_step - start_decay_step),\n",
    "                decay_steps, decay_factor, staircase=True),\n",
    "            name=\"learning_rate_decay_cond\")\n",
    "    \n",
    "    def setup_summary(self):\n",
    "        train_loss = tf.Variable(0.)\n",
    "        tf.summary.scalar('Train_loss', train_loss)\n",
    "        \n",
    "        test_loss = tf.Variable(0.)\n",
    "        tf.summary.scalar('Test_loss', test_loss)\n",
    "        \n",
    "        bleu_score = tf.Variable(0.)\n",
    "        tf.summary.scalar('BLEU_score', bleu_score)\n",
    "\n",
    "        tf.summary.scalar('lr_rate', self.learning_rate)\n",
    "        \n",
    "        summary_vars = [train_loss, test_loss, bleu_score]\n",
    "        summary_placeholders = [tf.placeholder(tf.float32) for _ in range(len(summary_vars))]\n",
    "        update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]\n",
    "        summary_op = tf.summary.merge_all()\n",
    "        return summary_placeholders, update_ops, summary_op"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## DP"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-04-26T08:01:32.649263Z",
     "start_time": "2019-04-26T08:01:32.633427Z"
    }
   },
   "outputs": [],
   "source": [
    "\n",
    "class VAE_DP:\n",
    "    def __init__(self, X_indices, Y_indices,  C_labels, X_w2id, Y_w2id, BATCH_SIZE, n_epoch, split_ratio=0.1, is_shuffle=False, test_data=None):\n",
    "        self.n_epoch = n_epoch\n",
    "        if test_data == None:\n",
    "            num_test = int(len(X_indices) * split_ratio)\n",
    "            r = np.random.permutation(len(X_indices))\n",
    "            X_indices = np.array(X_indices)[r].tolist()\n",
    "            Y_indices = np.array(Y_indices)[r].tolist()\n",
    "            C_labels = np.array(C_labels)[r].tolist()\n",
    "            self.C_train = np.array(C_labels[num_test:])\n",
    "            self.X_train = np.array(X_indices[num_test:])\n",
    "            self.Y_train = np.array(Y_indices[num_test:])\n",
    "            self.C_test = np.array(C_labels[:num_test])\n",
    "            self.X_test = np.array(X_indices[:num_test])\n",
    "            self.Y_test = np.array(Y_indices[:num_test])\n",
    "        else:\n",
    "            self.X_train, self.Y_train, self.C_train, self.X_test, self.Y_test, self.C_test = test_data\n",
    "            self.X_train = np.array(self.X_train)\n",
    "            self.Y_train = np.array(self.Y_train)\n",
    "            self.C_train = np.array(self.C_train)\n",
    "            self.X_test = np.array(self.X_test)\n",
    "            self.Y_test = np.array(self.Y_test)\n",
    "            self.C_test = np.array(self.C_test)\n",
    "            \n",
    "        assert len(self.X_train) == len(self.Y_train)\n",
    "        self.num_batch = int(len(self.X_train) / BATCH_SIZE)\n",
    "        self.is_shuffle = is_shuffle\n",
    "        self.num_steps = self.num_batch * self.n_epoch\n",
    "        self.batch_size = BATCH_SIZE\n",
    "        self.X_w2id = X_w2id\n",
    "        self.X_id2w = dict(zip(X_w2id.values(), X_w2id.keys()))\n",
    "        self.Y_w2id = Y_w2id\n",
    "        self.Y_id2w = dict(zip(Y_w2id.values(), Y_w2id.keys()))\n",
    "        self._x_pad = self.X_w2id['<PAD>']\n",
    "        self._y_pad = self.Y_w2id['<PAD>']\n",
    "        print('Train_data: %d | Test_data: %d | Batch_size: %d | Num_batch: %d | X_vocab_size: %d | Y_vocab_size: %d' % (len(self.X_train), len(self.X_test), BATCH_SIZE, self.num_batch, len(self.X_w2id), len(self.Y_w2id)))\n",
    "        \n",
    "    def next_batch(self, X, Y, C):\n",
    "        r = np.random.permutation(len(X))\n",
    "        X = X[r]\n",
    "        Y = Y[r]\n",
    "        C = C[r]\n",
    "        \n",
    "        for i in range(0, len(X) - len(X) % self.batch_size, self.batch_size):\n",
    "            if self.is_shuffle:\n",
    "                X_batch = []\n",
    "                for x in X[i : i + self.batch_size]:\n",
    "                    a = [t for t in range(len(x))]\n",
    "                    for j in range(len(a)):\n",
    "                        a[j] += np.random.randint(0, 4)\n",
    "                    p = np.argsort(a)\n",
    "                    #print(' '.join([id2w[idx] for idx in x]))\n",
    "                    x = np.array(copy.deepcopy(x))[p].tolist()\n",
    "                    #print(' '.join([id2w[idx] for idx in x]))\n",
    "                    X_batch.append(x)\n",
    "                X_batch = np.array(X_batch)\n",
    "            else:\n",
    "                X_batch = X[i : i + self.batch_size]\n",
    "\n",
    "            Y_batch = Y[i : i + self.batch_size]\n",
    "            C_batch = C[i : i + self.batch_size]\n",
    "            \n",
    "            padded_X_batch, X_batch_lens = self.pad_sentence_batch(X_batch, self._x_pad)\n",
    "            padded_Y_batch, Y_batch_lens = self.pad_sentence_batch(Y_batch, self._y_pad)\n",
    "            yield (np.array(padded_X_batch),\n",
    "                   np.array(padded_Y_batch),\n",
    "                   C_batch,\n",
    "                   X_batch_lens,\n",
    "                   Y_batch_lens)\n",
    "    \n",
    "    def sample_test_batch(self):\n",
    "        C = self.C_test[:self.batch_size]\n",
    "        padded_X_batch, X_batch_lens = self.pad_sentence_batch(self.X_test[: self.batch_size], self._x_pad)\n",
    "        padded_Y_batch, Y_batch_lens = self.pad_sentence_batch(self.Y_test[: self.batch_size], self._y_pad)\n",
    "        return np.array(padded_X_batch), np.array(padded_Y_batch), C, X_batch_lens, Y_batch_lens\n",
    "    \n",
    "        \n",
    "    def pad_sentence_batch(self, sentence_batch, pad_int):\n",
    "        padded_seqs = []\n",
    "        seq_lens = []\n",
    "        sentence_batch = sentence_batch.tolist()\n",
    "        max_sentence_len = max([len(sentence) for sentence in sentence_batch])\n",
    "        for sentence in sentence_batch:\n",
    "            padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))\n",
    "            seq_lens.append(len(sentence))\n",
    "        return padded_seqs, seq_lens\n",
    "\n",
    "\n",
    "        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Util"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-04-26T08:16:44.981366Z",
     "start_time": "2019-04-26T08:16:44.918392Z"
    }
   },
   "outputs": [],
   "source": [
    "import scipy.interpolate as si\n",
    "from scipy import interpolate\n",
    "\n",
    "def pad(x, pid, move_go=False):\n",
    "    x = [k[:30] for k in x]\n",
    "    if move_go:\n",
    "        length_list = [len(k)-1 for k in x]\n",
    "    else:\n",
    "        length_list = [len(k) for k in x]\n",
    "    max_length = max(length_list)\n",
    "    pad_x = []\n",
    "    for k in x:\n",
    "        if move_go:\n",
    "            pad_k = k[1:] + [pid,] * (max_length - len(k[1:]))\n",
    "        else:\n",
    "            pad_k = k + [pid,] * (max_length - len(k))\n",
    "        pad_x.append(pad_k)\n",
    "    return pad_x, length_list\n",
    "\n",
    "def pad_maxlength(x, pid, move_go=False):\n",
    "    max_length = 30\n",
    "    if move_go:\n",
    "        length_list = [len(k)-1 for k in x]\n",
    "    else:\n",
    "        length_list = [min(len(k), max_length) for k in x]\n",
    "    \n",
    "    pad_x = []\n",
    "    for k in x:\n",
    "        if move_go:\n",
    "            pad_k = k[1:] + [pid,] * (max_length - len(k[1:]))\n",
    "        else:\n",
    "            pad_k = k[:max_length] + [pid,] * (max_length - len(k))\n",
    "        pad_x.append(pad_k)\n",
    "    return pad_x, length_list\n",
    "\n",
    "def BetaGenerator(epoches, beta_decay_period, beta_decay_offset):\n",
    "    points = [[0,0], [0, beta_decay_offset],[0, beta_decay_offset + 0.33 * beta_decay_period], [1, beta_decay_offset + 0.66*beta_decay_period],[1, beta_decay_offset + beta_decay_period], [1, epoches] ];\n",
    "    points = np.array(points)\n",
    "    x = points[:,0]\n",
    "    y = points[:,1]\n",
    "    t = range(len(points))\n",
    "    ipl_t = np.linspace(0.0, len(points) - 1, 100)\n",
    "    x_tup = si.splrep(t, x, k=3)\n",
    "    y_tup = si.splrep(t, y, k=3)\n",
    "    x_list = list(x_tup)\n",
    "    xl = x.tolist()\n",
    "    x_list[1] = xl + [0.0, 0.0, 0.0, 0.0]\n",
    "    y_list = list(y_tup)\n",
    "    yl = y.tolist()\n",
    "    y_list[1] = yl + [0.0, 0.0, 0.0, 0.0]\n",
    "    x_i = si.splev(ipl_t, x_list)\n",
    "    y_i = si.splev(ipl_t, y_list)\n",
    "    return interpolate.interp1d(y_i, x_i)\n",
    "\n",
    "\n",
    "class VAE_util:\n",
    "    def __init__(self, dp, model, display_freq=3, is_show=True):\n",
    "        self.display_freq = display_freq\n",
    "        self.is_show = is_show\n",
    "        self.dp = dp\n",
    "        self.model = model\n",
    "        self.betaG = BetaGenerator(self.dp.n_epoch*self.dp.num_batch, self.model.beta_decay_period*self.dp.num_batch, self.model.beta_decay_offset*self.dp.num_batch)\n",
    "        \n",
    "        \n",
    "    def train(self, epoch):\n",
    "        avg_r_loss = 0.0\n",
    "        avg_c_loss = 0.0\n",
    "        avg_kl_loss = 0.0\n",
    "        avg_acc = 0.0\n",
    "        tic = time.time()\n",
    "        X_test_batch, Y_test_batch, C_test_batch, X_test_batch_lens, Y_test_batch_lens = self.dp.sample_test_batch()\n",
    "        for local_step, (X_train_batch, Y_train_batch, C_train_batch, X_train_batch_lens, Y_train_batch_lens) in enumerate(\n",
    "            self.dp.next_batch(self.dp.X_train, self.dp.Y_train, self.dp.C_train)):\n",
    "            #print(len(C_train_batch), len(X_train_batch))\n",
    "            beta = 0.001 + self.betaG(self.model.step)\n",
    "            self.model.step, _, r_loss, c_loss, acc, kl_loss = self.model.sess.run([self.model.global_step, self.model.train_op, self.model.r_loss, self.model.c_loss, self.model.accuracy, self.model.kl_loss], \n",
    "                                              {self.model.X: X_train_batch,\n",
    "                                               self.model.Y: Y_train_batch,\n",
    "                                               self.model.C: C_train_batch,\n",
    "                                               self.model.X_seq_len: X_train_batch_lens,\n",
    "                                               self.model.Y_seq_len: Y_train_batch_lens,\n",
    "                                               self.model.output_keep_prob:self.model._output_keep_prob,\n",
    "                                               self.model.input_keep_prob:self.model._input_keep_prob,\n",
    "                                              self.model.B:beta})\n",
    "            avg_r_loss += r_loss\n",
    "            avg_c_loss += c_loss\n",
    "            avg_kl_loss += kl_loss\n",
    "            avg_acc += acc\n",
    "            \"\"\"\n",
    "            stats = [loss]\n",
    "            for i in xrange(len(stats)):\n",
    "                self.model.sess.run(self.model.update_ops[i], feed_dict={\n",
    "                    self.model.summary_placeholders[i]: float(stats[i])\n",
    "                })\n",
    "            summary_str = self.model.sess.run([self.model.summary_op])\n",
    "            self.summary_writer.add_summary(summary_str, self.model.step + 1)\n",
    "            \"\"\"\n",
    "            if self.is_show:\n",
    "                if (local_step % int(self.dp.num_batch / self.display_freq)) == 0:\n",
    "                    val_r_loss, val_c_loss, val_acc, val_kl_loss = self.model.sess.run([self.model.r_loss, self.model.c_loss, self.model.accuracy, self.model.kl_loss], {self.model.X: X_test_batch,\n",
    "                                                         self.model.Y: Y_test_batch,\n",
    "                                                         self.model.C: C_test_batch,\n",
    "                                                         self.model.X_seq_len: X_test_batch_lens,\n",
    "                                                         self.model.Y_seq_len: Y_test_batch_lens,\n",
    "                                                         self.model.output_keep_prob:1,\n",
    "                                                         self.model.input_keep_prob:1,\n",
    "                                                         self.model.B:beta})\n",
    "                    print(\"Epoch %d/%d | Batch %d/%d | Train_loss: R %.3f C %.3f acc %.3f kl %.3f | Test_loss: R %.3f C %.3f acc %.3f kl %.3f | Time_cost:%.3f\" % (epoch, self.n_epoch, local_step, self.dp.num_batch, \n",
    "                                                                                                                                                   avg_r_loss / (local_step + 1),\n",
    "                                                                                                                                                   avg_c_loss / (local_step + 1),\n",
    "                                                                                                                                                   avg_acc / (local_step + 1),\n",
    "                                                                                                                                                   avg_kl_loss / (local_step + 1),\n",
    "                                                                                                                                                   val_r_loss,\n",
    "                                                                                                                                                   val_c_loss,\n",
    "                                                                                                                                                   val_acc,\n",
    "                                                                                                                                                   val_kl_loss,\n",
    "                                                                                                                                                   time.time()-tic))\n",
    "                    self.cal()\n",
    "\n",
    "                    tic = time.time()\n",
    "        return avg_r_loss / (local_step + 1), avg_c_loss / (local_step + 1), avg_acc / (local_step + 1), avg_kl_loss / (local_step + 1)\n",
    "    \n",
    "    def test(self):\n",
    "        avg_r_loss = 0.0\n",
    "        avg_c_loss = 0.0\n",
    "        avg_kl_loss = 0.0\n",
    "        avg_acc = 0.0\n",
    "        beta = 0.001 + self.betaG(self.model.step)\n",
    "        for local_step, (X_test_batch, Y_test_batch, C_test_batch, X_test_batch_lens, Y_test_batch_lens) in enumerate(\n",
    "            self.dp.next_batch(self.dp.X_test, self.dp.Y_test, self.dp.C_test)):\n",
    "            r_loss, c_loss, acc, kl_loss = self.model.sess.run([self.model.r_loss, self.model.c_loss, self.model.accuracy, self.model.kl_loss], {self.model.X: X_test_batch,\n",
    "                                                 self.model.Y: Y_test_batch,\n",
    "                                                 self.model.C: C_test_batch,\n",
    "                                                 self.model.X_seq_len: X_test_batch_lens,\n",
    "                                                 self.model.Y_seq_len: Y_test_batch_lens,\n",
    "                                                 self.model.output_keep_prob:1,\n",
    "                                                 self.model.input_keep_prob:1,\n",
    "                                                 self.model.B:beta})\n",
    "            avg_r_loss += r_loss\n",
    "            avg_c_loss += c_loss\n",
    "            avg_kl_loss += kl_loss\n",
    "            avg_acc += acc\n",
    "        return avg_r_loss / (local_step + 1), avg_c_loss / (local_step + 1), avg_acc / (local_step + 1),  avg_kl_loss / (local_step + 1)\n",
    "    \n",
    "    def fit(self, train_dir, is_bleu):\n",
    "        self.n_epoch = self.dp.n_epoch\n",
    "        out_dir = train_dir\n",
    "        if not os.path.exists(out_dir):\n",
    "            os.makedirs(out_dir)\n",
    "        print(\"Writing to %s\" % out_dir)\n",
    "        checkpoint_prefix = os.path.join(out_dir, \"model\")\n",
    "        self.summary_writer = tf.summary.FileWriter(os.path.join(out_dir, 'Summary'), self.model.sess.graph)\n",
    "        for epoch in range(1, self.n_epoch+1):\n",
    "            tic = time.time()\n",
    "            train_r_loss, train_c_loss, train_acc, train_kl = self.train(epoch)\n",
    "            test_r_loss, test_c_loss, test_acc, test_kl = self.test()\n",
    "            \n",
    "            print(\"Epoch %d/%d | Train_loss: R %.3f C %.3f acc %.3f kl %.3f | Test_loss: R %.3f C %.3f acc %.3f kl %.3f \" % (epoch, self.n_epoch, train_r_loss, train_c_loss, train_acc, train_kl,\n",
    "                                                                                                              test_r_loss, test_c_loss, test_acc, test_kl))\n",
    "            path = self.model.saver.save(self.model.sess, checkpoint_prefix, global_step=epoch)\n",
    "            print(\"Saved model checkpoint to %s\" % path)\n",
    "    \n",
    "    def show(self, sent, id2w):\n",
    "        return \" \".join([id2w.get(idx, u'&') for idx in sent])\n",
    "    \n",
    "    def cal(self, n_example=5):\n",
    "        train_n_example = int(n_example / 2)\n",
    "        test_n_example = n_example - train_n_example\n",
    "        for _ in range(test_n_example):\n",
    "            example = self.show(self.dp.X_test[_], self.dp.X_id2w)\n",
    "            y = self.show(self.dp.Y_test[_], self.dp.Y_id2w)\n",
    "            o, c = self.model.infer_with_c(example)\n",
    "            o = o[0]\n",
    "            print('Input: %s | Output: %s %d | GroundTruth: %s %d' % (example, o, c, y, np.argmax(self.dp.C_test[_])))\n",
    "        for _ in range(train_n_example):\n",
    "            example = self.show(self.dp.X_train[_], self.dp.X_id2w)\n",
    "            y = self.show(self.dp.Y_train[_], self.dp.Y_id2w)\n",
    "            o = self.model.infer(example)[0]\n",
    "            print('Input: %s | Output: %s %d | GroundTruth: %s %d' % (example, o, c, y , np.argmax(self.dp.C_train[_]))) \n",
    "        print(\"\")\n",
    "        \n",
    "    def test_bleu(self, N=300, gram=4):\n",
    "        all_score = []\n",
    "        for i in range(N):\n",
    "            input_indices = self.show(self.dp.X_test[i], self.dp.X_id2w)\n",
    "            o = self.model.infer(input_indices)[0]\n",
    "            refer4bleu = [[' '.join([self.dp.Y_id2w.get(w, u'&') for w in self.dp.Y_test[i]])]]\n",
    "            candi = [' '.join(w for w in o)]\n",
    "            score = BLEU(candi, refer4bleu, gram=gram)\n",
    "            all_score.append(score)\n",
    "        return np.mean(all_score)\n",
    "    \n",
    "    def show_res(self, path):\n",
    "        res = cPickle.load(open(path))\n",
    "        plt.figure(1)\n",
    "        plt.title('The results') \n",
    "        l1, = plt.plot(res[0], 'g')\n",
    "        l2, = plt.plot(res[1], 'r')\n",
    "        l3, = plt.plot(res[3], 'b')\n",
    "        plt.legend(handles = [l1, l2, l3], labels = [\"Train_loss\",\"Test_loss\",\"BLEU\"], loc = 'best')\n",
    "        plt.show()\n",
    "        \n",
    "    def test_all(self, path, epoch_range, is_bleu=True):\n",
    "        val_loss_list = []\n",
    "        bleu_list = []\n",
    "        for i in range(epoch_range[0], epoch_range[-1]):\n",
    "            self.model.restore(path + str(i))\n",
    "            val_loss = self.test()\n",
    "            val_loss_list.append(val_loss)\n",
    "            if is_bleu:\n",
    "                bleu_score = self.test_bleu()\n",
    "                bleu_list.append(bleu_score)\n",
    "        plt.figure(1)\n",
    "        plt.title('The results') \n",
    "        l1, = plt.plot(val_loss_list,'r')\n",
    "        l2, = plt.plot(bleu_list,'b')\n",
    "        plt.legend(handles = [l1, l2], labels = [\"Test_loss\",\"BLEU\"], loc = 'best')\n",
    "        plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:35:52.272827Z",
     "start_time": "2019-05-15T12:35:52.256729Z"
    }
   },
   "outputs": [],
   "source": [
    "import pickle\n",
    "w2id, id2w = pickle.load(open('/workspace/Data/amazon/w2id_id2w.pkl','rb'))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-13T11:03:27.216655Z",
     "start_time": "2019-05-13T11:03:23.616513Z"
    }
   },
   "outputs": [],
   "source": [
    "Y_train, C_train = pickle.load(open('/workspace/Data/amazon/XC_train.pkl','rb'))\n",
    "Y_dev, C_dev = pickle.load(open('/workspace/Data/amazon/XC_dev.pkl','rb'))\n",
    "Y_test, C_test = pickle.load(open('/workspace/Data/amazon/XC_test.pkl','rb'))\n",
    "print(C_train[0])\n",
    "\n",
    "X_train = [x[:-1] for x in Y_train]\n",
    "X_dev = [x[:-1] for x in Y_dev]\n",
    "X_test = [x[:-1] for x in Y_test]\n",
    "print(idx2str(Y_test[0]), idx2str(X_test[0]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Experiments"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Init Train model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-04-26T08:01:38.204448Z",
     "start_time": "2019-04-26T08:01:37.447153Z"
    }
   },
   "outputs": [],
   "source": [
    "BATCH_SIZE = 200\n",
    "NUM_EPOCH = 35\n",
    "is_shuffle = False\n",
    "Latent_weight = 0.4\n",
    "Model_basic_name = 'VAEGS-BOW-04'\n",
    "train_dir ='Model/Amazon/' + Model_basic_name\n",
    "vae_dp = VAE_DP(None, None, None, w2id, w2id, BATCH_SIZE, test_data=(X_train, Y_train, C_train, X_dev, Y_dev, C_dev), n_epoch=NUM_EPOCH, is_shuffle=is_shuffle)\n",
    "\n",
    "\n",
    "\n",
    "is_training = False"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-03-25T09:17:30.105910Z",
     "start_time": "2019-03-25T09:17:30.102059Z"
    }
   },
   "source": [
    "## Training"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-04-22T04:17:15.517397Z",
     "start_time": "2019-04-22T04:16:32.988467Z"
    }
   },
   "outputs": [],
   "source": [
    "#is_training = True\n",
    "if is_training:\n",
    "    g = tf.Graph() \n",
    "    sess = tf.Session(graph=g, config=sess_conf) \n",
    "    with sess.as_default():\n",
    "        with sess.graph.as_default():\n",
    "            model = VAE(\n",
    "                dp = vae_dp,\n",
    "                rnn_size = 512,\n",
    "                n_layers = 1,\n",
    "                encoder_embedding_dim = 128,\n",
    "                decoder_embedding_dim = 128,\n",
    "                cell_type = 'lstm',\n",
    "                latent_dim = 512,\n",
    "                beta_decay_period = 10, \n",
    "                beta_decay_offset = 5,\n",
    "                latent_weight = Latent_weight,\n",
    "                bow_size = 400,\n",
    "                is_inference = False,\n",
    "                num_classes = 2,\n",
    "                max_infer_length = 20,\n",
    "                #att_type='B',\n",
    "                beam_width=10,\n",
    "                Lambda = 0.9,\n",
    "                gamma = 10.0,\n",
    "                residual = False,\n",
    "                sess=sess\n",
    "            )\n",
    "            #print(len(tf.global_variables()))\n",
    "\n",
    "    util = VAE_util(dp=vae_dp, model=model)\n",
    "    model.restore('Model/Amazon/VAEGS-BOW-04/model-35')\n",
    "    #util.fit(train_dir=train_dir, is_bleu=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Init Test model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-04-26T08:01:54.124433Z",
     "start_time": "2019-04-26T08:01:43.733656Z"
    }
   },
   "outputs": [],
   "source": [
    "g = tf.Graph() \n",
    "sess = tf.Session(graph=g, config=sess_conf) \n",
    "with sess.as_default():\n",
    "    with sess.graph.as_default():\n",
    "        model = VAE(\n",
    "            dp = vae_dp,\n",
    "            rnn_size = 512,\n",
    "            n_layers = 1,\n",
    "            encoder_embedding_dim = 128,\n",
    "            decoder_embedding_dim = 128,\n",
    "            cell_type = 'lstm',\n",
    "            latent_dim = 512,\n",
    "            beta_decay_period = 10, \n",
    "            beta_decay_offset = 5,\n",
    "            latent_weight = Latent_weight,\n",
    "            bow_size = 400,\n",
    "            is_inference = True,\n",
    "            num_classes = 2,\n",
    "            max_infer_length = 20,\n",
    "            #att_type='B',\n",
    "            beam_width=10,\n",
    "            Lambda = 0.9,\n",
    "            gamma = 10.0,\n",
    "            residual = False,\n",
    "            sess=sess\n",
    "        )\n",
    "        #print(len(tf.global_variables()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Init TextCNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-04-25T06:22:51.881414Z",
     "start_time": "2019-04-25T06:22:50.017265Z"
    }
   },
   "outputs": [],
   "source": [
    "from textCNN import *\n",
    "BATCH_SIZE = 256\n",
    "NUM_EPOCH = 30\n",
    "MAX_LENGTH = 16\n",
    "\n",
    "cnn_dp = TextCNN_DP(X_train, C_train, w2id,  BATCH_SIZE, max_length = MAX_LENGTH, n_epoch=NUM_EPOCH, split_ratio=0.05)\n",
    "\n",
    "emb_dim = 128\n",
    "filter_sizes = [1, 2, 3, 4, 5]\n",
    "num_filters = [128, 128, 128, 128, 128]\n",
    "\n",
    "g_cnn = tf.Graph() \n",
    "sess_cnn = tf.Session(graph=g_cnn, config=sess_conf) \n",
    "with sess_cnn.as_default():\n",
    "    with sess_cnn.graph.as_default():\n",
    "        D = TextCNN(sess = sess_cnn, dp = cnn_dp, sequence_length=MAX_LENGTH, num_classes=2, vocab_size=len(cnn_dp.id2w),\n",
    "                          emd_dim = emb_dim, filter_sizes = filter_sizes, num_filters=num_filters,\n",
    "                          l2_reg_lambda=0.2, dropout_keep_prob=0.75)\n",
    "        D.sess.run(tf.global_variables_initializer())\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-04-22T02:41:41.755923Z",
     "start_time": "2019-04-22T02:41:41.722953Z"
    }
   },
   "outputs": [],
   "source": [
    "D.restore('Model/Amazon/TextCNN/model-30')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## ReTrain Attribute Predictor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-04-22T02:41:42.093525Z",
     "start_time": "2019-04-22T02:41:41.758478Z"
    }
   },
   "outputs": [],
   "source": [
    "is_training_2 = False\n",
    "\n",
    "if is_training_2:\n",
    "    #model.restore(train_dir + '/model-%d' % NUM_EPOCH)\n",
    "    model.restore('Model/Amazon/VAEGS-BOW-04/model-35')\n",
    "    n_epoch = 10\n",
    "    batch_size = 200\n",
    "    #out_dir = train_dir + '2'\n",
    "    out_dir = 'Model/Amazon/VAEGS-BOW-04-2stage'\n",
    "    random.shuffle(X_train)\n",
    "    if not os.path.exists(out_dir):\n",
    "        os.makedirs(out_dir)\n",
    "    print(\"Writing to %s\" % out_dir)\n",
    "    checkpoint_prefix = os.path.join(out_dir, \"model\")\n",
    "\n",
    "    X_train = np.array(X_train)\n",
    "    for e in range(n_epoch):\n",
    "        acc_list = []\n",
    "        loss_list = []\n",
    "        r = np.random.permutation(len(X_train))\n",
    "        X_train = X_train[r]\n",
    "\n",
    "        if e == 0:\n",
    "            acc_test_list = []\n",
    "            for b in range(0, len(X_test) - len(X_test) % batch_size, batch_size):\n",
    "                X_batch = X_test[b : b + batch_size]\n",
    "                z_mu = model.idxTozmu_batch(X_batch)\n",
    "                o_str, o_idx = model.zmuTox_batch(z_mu)\n",
    "                output = D.batch_infer(o_idx)\n",
    "                C = np.zeros([len(output), 2])\n",
    "                for i, o in enumerate(output):\n",
    "                    C[i][o] = 1\n",
    "               \n",
    "                loss, acc = model.sess.run([model.c_loss, model.accuracy], \n",
    "                           {model.z:z_mu, model.C:C, \n",
    "                            model.output_keep_prob:1.0,\n",
    "                            model.input_keep_prob:1.0})\n",
    "                acc_test_list.append(acc)\n",
    "            print('%d/%d: Test: %.4f' % (e, n_epoch, np.mean(acc_test_list)))\n",
    "        ## Train\n",
    "        for b in range(0, len(X_train) - len(X_train) % batch_size, batch_size):\n",
    "            X_batch = X_train[b : b + batch_size]\n",
    "            z_mu = model.idxTozmu_batch(X_batch)\n",
    "            o_str, o_idx = model.zmuTox_batch(z_mu)\n",
    "            \n",
    "            output = D.batch_infer(o_idx)\n",
    "            C = np.zeros([len(output), 2])\n",
    "            for i, o in enumerate(output):\n",
    "                C[i][o] = 1\n",
    "            \n",
    "            _, loss, acc = model.sess.run([model.predictor_op, model.c_loss, model.accuracy], \n",
    "                           {model.z:z_mu, model.C:C, \n",
    "                            model.output_keep_prob:model._output_keep_prob,\n",
    "                            model.input_keep_prob:model._input_keep_prob})\n",
    "            loss_list.append(loss)\n",
    "            acc_list.append(acc)\n",
    "\n",
    "            z_mu = np.random.randn(batch_size, 512)\n",
    "            o_str, o_idx = model.zmuTox_batch(z_mu)\n",
    "            \n",
    "            output = D.batch_infer(o_idx)\n",
    "            C = np.zeros([len(output), 2])\n",
    "            for i, o in enumerate(output):\n",
    "                C[i][o] = 1\n",
    "            \n",
    "            \n",
    "            _, loss, acc = model.sess.run([model.predictor_op, model.c_loss, model.accuracy], \n",
    "                           {model.z:z_mu, model.C:C, \n",
    "                            model.output_keep_prob:model._output_keep_prob,\n",
    "                            model.input_keep_prob:model._input_keep_prob})\n",
    "            loss_list.append(loss)\n",
    "            acc_list.append(acc)\n",
    "            if (b % (50 * batch_size)) == 0:\n",
    "                print(e, b / batch_size, np.mean(loss_list), np.mean(acc_list))\n",
    "\n",
    "        ## Test   \n",
    "        acc_test_list = []\n",
    "        for b in range(0, len(X_test) - len(X_test) % batch_size, batch_size):\n",
    "            X_batch = X_test[b : b + batch_size]\n",
    "            z_mu = model.idxTozmu_batch(X_batch)\n",
    "            o_str, o_idx = model.zmuTox_batch(z_mu)\n",
    "            output = D.batch_infer(o_idx)\n",
    "            C = np.zeros([len(output), 2])\n",
    "            for i, o in enumerate(output):\n",
    "                C[i][o] = 1\n",
    "           \n",
    "            loss, acc = model.sess.run([model.c_loss, model.accuracy], \n",
    "                       {model.z:z_mu, model.C:C, \n",
    "                        model.output_keep_prob:1.0,\n",
    "                        model.input_keep_prob:1.0})\n",
    "            acc_test_list.append(acc)\n",
    "        print('%d/%d: Test: %.4f | Train: %.4f' % (e+1, n_epoch, np.mean(acc_test_list), np.mean(acc_list)))\n",
    "        path = model.saver.save(model.sess, checkpoint_prefix, global_step=e+1)\n",
    "        print(\"Saved model checkpoint to %s\" % path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## ReTrain Content Predictor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-04-22T02:41:42.308991Z",
     "start_time": "2019-04-22T02:41:42.095506Z"
    }
   },
   "outputs": [],
   "source": [
    "def cal_F1(predict, idx, lengths):\n",
    "    cnt = 0.001\n",
    "    P = 0.0\n",
    "    R = 0.0\n",
    "    pred_idx = np.argsort(predict)[::-1][:lengths]\n",
    "    #print(pred_idx, idx)\n",
    "    for t in pred_idx:\n",
    "        if t in idx:\n",
    "            cnt += 1\n",
    "    P = cnt / len(set(idx)) \n",
    "    #if P > 1.0:\n",
    "    #    print(cnt, np.sum(predict>0.5))\n",
    "    R = cnt / len(set(idx))\n",
    "    F1 = (2* P*R) / (P+R)\n",
    "    return P, R ,F1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-04-22T02:41:42.709760Z",
     "start_time": "2019-04-22T02:41:42.312146Z"
    }
   },
   "outputs": [],
   "source": [
    "is_training_3 = False\n",
    "\n",
    "if is_training_3:\n",
    "    #model.restore(train_dir + '/model-%d' % NUM_EPOCH)\n",
    "    model.restore('Model/Amazon/VAEGS-BOW-04-2stage/model-10')\n",
    "    n_epoch = 10\n",
    "    batch_size = 200\n",
    "    #out_dir = train_dir + '2'\n",
    "    out_dir = 'Model/Amazon/VAEGS-BOW-04-3stage'\n",
    "    random.shuffle(X_train)\n",
    "    if not os.path.exists(out_dir):\n",
    "        os.makedirs(out_dir)\n",
    "    print(\"Writing to %s\" % out_dir)\n",
    "    checkpoint_prefix = os.path.join(out_dir, \"model\")\n",
    "\n",
    "    X_train = np.array(X_train)\n",
    "    for e in range(n_epoch):\n",
    "        p_list = []\n",
    "        r_list = []\n",
    "        f1_list = []\n",
    "        loss_list = []\n",
    "        r = np.random.permutation(len(X_train))\n",
    "        X_train = X_train[r]\n",
    "\n",
    "        if e == 0:\n",
    "            p_test_list = []\n",
    "            r_test_list = []\n",
    "            f1_test_list = []\n",
    "            for b in range(0, len(X_test) - len(X_test) % batch_size, batch_size):\n",
    "                X_batch = X_test[b : b + batch_size]\n",
    "                z_mu = model.idxTozmu_batch(X_batch)\n",
    "                o_str, o_idx = model.zmuTox_batch(z_mu)\n",
    "                pad_x, length_list = pad(o_idx, w2id['<PAD>'], move_go=False)\n",
    "                \n",
    "                loss, logits = model.sess.run([model.avg_bow_loss, model.bow_logits], \n",
    "                           {model.z:z_mu, model.Y:pad_x,\n",
    "                            model.Y_seq_len:length_list,\n",
    "                            model.output_keep_prob:1.0,\n",
    "                            model.input_keep_prob:1.0})\n",
    "                for j,x in enumerate(o_idx):\n",
    "                    p, r, f1 = cal_F1(logits[j], x, len(x))\n",
    "                    p_test_list.append(p)\n",
    "                    r_test_list.append(r)\n",
    "                    f1_test_list.append(f1)\n",
    "            print('%d/%d: Test: P %.2f | R %.2f | F1 %.2f' % (e, n_epoch, np.mean(p_test_list)*100, np.mean(r_test_list) *100, np.mean(f1_test_list)*100))\n",
    "        \n",
    "        ## Train\n",
    "        for b in range(0, len(X_train) - len(X_train) % batch_size, batch_size):\n",
    "            X_batch = X_train[b : b + batch_size]\n",
    "            z_mu = model.idxTozmu_batch(X_batch)\n",
    "            o_str, o_idx = model.zmuTox_batch(z_mu)\n",
    "            pad_x, length_list = pad(o_idx, w2id['<PAD>'], move_go=False)\n",
    "            _, loss, logits = model.sess.run([model.bow_op, model.avg_bow_loss, model.bow_logits], \n",
    "                       {model.z:z_mu, model.Y:pad_x,\n",
    "                        model.Y_seq_len:length_list,\n",
    "                        model.output_keep_prob:model._output_keep_prob,\n",
    "                        model.input_keep_prob:model._input_keep_prob})\n",
    "            \n",
    "            for j,x in enumerate(o_idx):\n",
    "                p, r, f1 = cal_F1(logits[j], x, len(x))\n",
    "                p_list.append(p)\n",
    "                r_list.append(r)\n",
    "                f1_list.append(f1)\n",
    "            loss_list.append(loss)\n",
    "\n",
    "            z_mu = np.random.randn(batch_size, 512)\n",
    "            o_str, o_idx = model.zmuTox_batch(z_mu)\n",
    "            pad_x, length_list = pad(o_idx, w2id['<PAD>'], move_go=False)\n",
    "            _, loss, logits = model.sess.run([model.bow_op, model.avg_bow_loss, model.bow_logits], \n",
    "                       {model.z:z_mu, model.Y:pad_x,\n",
    "                        model.Y_seq_len:length_list,\n",
    "                        model.output_keep_prob:model._output_keep_prob,\n",
    "                        model.input_keep_prob:model._input_keep_prob})\n",
    "            \n",
    "            for j,x in enumerate(o_idx):\n",
    "                p, r, f1 = cal_F1(logits[j], x, len(x))\n",
    "                p_list.append(p)\n",
    "                r_list.append(r)\n",
    "                f1_list.append(f1)\n",
    "            loss_list.append(loss)\n",
    "            if (b % (50 * batch_size)) == 0:\n",
    "                print(\"epoch %d/%d batch %d/%d: loss %.5f | P %.2f | R %.2f | F1 %.2f\" % (e, n_epoch, int(b / batch_size), \n",
    "                                                                                          int((len(X_train) - len(X_train) % batch_size) / batch_size),\n",
    "                                                                                          np.mean(loss_list), np.mean(p_list)*100, np.mean(r_list)*100, np.mean(f1_list)*100))\n",
    "\n",
    "        ## Test   \n",
    "        p_test_list = []\n",
    "        r_test_list = []\n",
    "        f1_test_list = []\n",
    "        for b in range(0, len(X_test) - len(X_test) % batch_size, batch_size):\n",
    "            X_batch = X_test[b : b + batch_size]\n",
    "            z_mu = model.idxTozmu_batch(X_batch)\n",
    "            o_str, o_idx = model.zmuTox_batch(z_mu)\n",
    "            pad_x, length_list = pad(o_idx, w2id['<PAD>'], move_go=False)\n",
    "\n",
    "            loss, logits = model.sess.run([model.avg_bow_loss, model.bow_logits], \n",
    "                       {model.z:z_mu, model.Y:pad_x,\n",
    "                        model.Y_seq_len:length_list,\n",
    "                        model.output_keep_prob:1.0,\n",
    "                        model.input_keep_prob:1.0})\n",
    "            for j,x in enumerate(o_idx):\n",
    "                p, r, f1 = cal_F1(logits[j], x, len(x))\n",
    "                p_test_list.append(p)\n",
    "                r_test_list.append(r)\n",
    "                f1_test_list.append(f1)\n",
    "        print('%d/%d: Test: P %.2f | R %.2f | F1 %.2f' % (e+1, n_epoch, np.mean(p_test_list) *100, np.mean(r_test_list) *100, np.mean(f1_test_list)*100))\n",
    "        print('%d/%d: Train: P %.2f | R %.2f | F1 %.2f' % (e+1, n_epoch, np.mean(p_list)*100, np.mean(r_list)*100, np.mean(f1_list)*100))\n",
    "        path = model.saver.save(model.sess, checkpoint_prefix, global_step=e+1)\n",
    "        print(\"Saved model checkpoint to %s\" % path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Style Transfer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:35:56.583985Z",
     "start_time": "2019-05-15T12:35:56.558553Z"
    }
   },
   "outputs": [],
   "source": [
    "w2id, id2w = pickle.load(open('/workspace/Data/amazon/w2id_id2w.pkl','rb'))\n",
    "original, reference, original_noun, reference_noun = pickle.load(open('/workspace/Data/amazon/original_reference_and_noun.pkl','rb'))\n",
    "C_original = [[1, 0] for i in range(500)] + [[0, 1] for i in range(500)]\n",
    "assert len(original) == 1000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-07T07:31:45.772331Z",
     "start_time": "2019-05-07T07:31:45.741271Z"
    }
   },
   "outputs": [],
   "source": [
    "\n",
    "def style_transfer(o_str, o_c, ref, nouns, Gamma=0.1, AUC_threshold=0.98, save_threshold=0.7):\n",
    "    tic = time.time()\n",
    "    nouns = set([t[0] for t in nouns])\n",
    "    t_c = int(1 - np.argmax(o_c))\n",
    "    new_str = ''\n",
    "    outputs_list = []\n",
    "    z, grad = model.xTozmu_gradmu(o_str, t_c)\n",
    "    p = 1\n",
    "    cnt = 0\n",
    "    while (True):\n",
    "        z1 = z - p * Gamma * grad[0]\n",
    "        nxs, cs, auc = model.zTox(z1)\n",
    "        cnt += 1\n",
    "        p += 0.5\n",
    "        #print(nxs[0], '%.2f' % auc[t_c], np.linalg.norm(grad))\n",
    "        if nxs[0] != new_str and auc[t_c] > save_threshold:\n",
    "            new_str = nxs[0]\n",
    "            bleu = mybleu.BLEU([new_str], [[ref]])[1]\n",
    "            sleu = mybleu.BLEU([new_str], [[o_str]])[1]\n",
    "            noun_cnt = 0\n",
    "            #print(auc[t_c])\n",
    "            for w in new_str.split():\n",
    "                if w in nouns:\n",
    "                    noun_cnt += 1\n",
    "            outputs_list.append((new_str, bleu, sleu, auc[t_c], noun_cnt, cnt))\n",
    "        if (auc[t_c] > AUC_threshold) or (cnt > 100):\n",
    "            break\n",
    "        \n",
    "    #print('%.3f' % (time.time()-tic))\n",
    "    return outputs_list\n",
    "\n",
    "\n",
    "\n",
    "def style_transfer_content(o_str, o_c, ref, nouns, Gamma=0.1, GammaB=0.01, AUC_threshold=0.98, save_threshold=0.7, content_type='1'):\n",
    "    tic = time.time()\n",
    "    nouns = set([t[0] for t in nouns])\n",
    "    if content_type == '1':\n",
    "        content = o_str\n",
    "    elif content_type == '2':\n",
    "        content = ' '.join(list(nouns))\n",
    "    else:\n",
    "        content = o_str + ' ' + ' '.join(list(nouns))\n",
    "    t_c = int(1 - np.argmax(o_c))\n",
    "    new_str = ''\n",
    "    outputs_list = []\n",
    "    z, grad, bgrad = model.xTozmu_gradBowCmu(o_str, t_c, content)\n",
    "    p = 1\n",
    "    cnt = 0\n",
    "    #print('[content]', content)\n",
    "    while (True):\n",
    "        z1 = z - p * (Gamma * grad[0] + GammaB * bgrad[0])\n",
    "        nxs, cs, auc = model.zTox(z1)\n",
    "        cnt += 1\n",
    "        p += 0.5\n",
    "        #print(nxs[0], '%.2f' % auc[t_c], p * Gamma * np.linalg.norm(grad), p* GammaB * np.linalg.norm(bgrad))\n",
    "        if nxs[0] != new_str and auc[t_c] > save_threshold:\n",
    "            new_str = nxs[0]\n",
    "            bleu = mybleu.BLEU([new_str], [[ref]])[1]\n",
    "            sleu = mybleu.BLEU([new_str], [[o_str]])[1]\n",
    "            noun_cnt = 0\n",
    "            #print(auc[t_c])\n",
    "            for w in new_str.split():\n",
    "                if w in nouns:\n",
    "                    noun_cnt += 1\n",
    "            outputs_list.append((new_str, bleu, sleu, auc[t_c], noun_cnt, cnt))\n",
    "        if (auc[t_c] > AUC_threshold) or (cnt > 100):\n",
    "            break\n",
    "        \n",
    "    #print('%.3f' % (time.time()-tic))\n",
    "    return outputs_list\n",
    "\n",
    "def style_transfer_content_it(o_str, o_c, ref, nouns, Gamma=0.1, GammaB=0.01, AUC_threshold=0.98, save_threshold=0.7, content_type='1'):\n",
    "    tic = time.time()\n",
    "    #print('o_str', o_str)\n",
    "    nouns = set([t[0] for t in nouns])\n",
    "    if content_type == '1':\n",
    "        content = o_str\n",
    "    elif content_type == '2':\n",
    "        content = ' '.join(list(nouns))\n",
    "    else:\n",
    "        content = o_str + ' ' + ' '.join(list(nouns))\n",
    "    #print('[content]', content)\n",
    "    t_c = int(1 - np.argmax(o_c))\n",
    "    new_str = ''\n",
    "    outputs_list = []\n",
    "    z, grad, bgrad = model.xTozmu_gradBowCmu(o_str, t_c, content)\n",
    "    p = 1\n",
    "    cnt = 0\n",
    "    while (True):\n",
    "        z = z - p * (Gamma * grad[0] + GammaB * bgrad[0])\n",
    "        nxs, cs, auc = model.zTox(z)\n",
    "        cnt += 1\n",
    "        #print(nxs[0], '%.2f' % auc[t_c], p * Gamma * np.linalg.norm(grad), p* GammaB * np.linalg.norm(bgrad))\n",
    "        if nxs[0] != new_str and auc[t_c] > save_threshold:\n",
    "            new_str = nxs[0]\n",
    "            bleu = mybleu.BLEU([new_str], [[ref]])[1]\n",
    "            sleu = mybleu.BLEU([new_str], [[o_str]])[1]\n",
    "            noun_cnt = 0\n",
    "            #print(auc[t_c])\n",
    "            for w in new_str.split():\n",
    "                if w in nouns:\n",
    "                    noun_cnt += 1\n",
    "            outputs_list.append((new_str, bleu, sleu, auc[t_c], noun_cnt, cnt))\n",
    "        if (auc[t_c] > AUC_threshold) or (cnt > 100):\n",
    "            break\n",
    "        grad, bgrad = model.zmu_gradBowCmu(z, t_c, content)\n",
    "    #print('%.3f' % (time.time()-tic))\n",
    "    return outputs_list\n",
    "\n",
    "def style_transfer_it(o_str, o_c, ref, nouns, Gamma, AUC_threshold=0.99, save_threshold=0.8):\n",
    "    tic = time.time()\n",
    "    nouns = set([t[0] for t in nouns])\n",
    "    t_c = int(1 - np.argmax(o_c))\n",
    "    new_str = ''\n",
    "    outputs_list = []\n",
    "    z, grad = model.xTozmu_gradmu(o_str, t_c)\n",
    "    p = 1\n",
    "    cnt = 0\n",
    "    while (True):\n",
    "        z = z - p * Gamma * grad[0]\n",
    "        nxs, cs, auc = model.zTox(z)\n",
    "        cnt += 1\n",
    "        #print(nxs[0], '%.2f' % auc[t_c], np.linalg.norm(grad))\n",
    "        if nxs[0] != new_str and auc[t_c] > save_threshold:\n",
    "            new_str = nxs[0]\n",
    "            bleu = mybleu.BLEU([new_str], [[ref]])[1]\n",
    "            sleu = mybleu.BLEU([new_str], [[o_str]])[1]\n",
    "            noun_cnt = 0\n",
    "            #print(auc[t_c])\n",
    "            for w in new_str.split():\n",
    "                if w in nouns:\n",
    "                    noun_cnt += 1\n",
    "            outputs_list.append((new_str, bleu, sleu, auc[t_c], noun_cnt, cnt))\n",
    "        if (auc[t_c] > AUC_threshold) or (cnt > 100):\n",
    "            break\n",
    "        grad = model.zmu_gradCmu(z, t_c)\n",
    "    #print('%.3f' % (time.time()-tic))\n",
    "    return outputs_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "is_inference = True\n",
    "content_type_list = ['0','1', '2']\n",
    "GAMMA = 0.1\n",
    "GAMMAB_list = [0.1, 0.05]\n",
    "AUC_threshold = 0.901\n",
    "Save_threshold = 0.90\n",
    "Is_it = False \n",
    "Model_IT = 10\n",
    "model.restore('Model/Amazon/VAEGS-BOW-04-3stage/model-%d' % Model_IT)\n",
    "if is_inference:\n",
    "    Model_basic_name = 'VAEGS-BOW-04-3stage'\n",
    "    for GAMMAB in GAMMAB_list:\n",
    "        for Content_type in content_type_list:\n",
    "            print('Results/Amazon/res_%s-model%d--GC%sThre%sIt%sCont%s.pkl' % (Model_basic_name, Model_IT, GAMMAB, Save_threshold, Is_it, Content_type))\n",
    "            tic = time.time()\n",
    "            res = []\n",
    "            for i in range(1000):\n",
    "                if Content_type == '0':\n",
    "                    if Is_it:\n",
    "                        outputs_list = style_transfer_it(original[i], C_original[i], reference[i], original_noun[i], GAMMA, AUC_threshold, Save_threshold)\n",
    "                    else:\n",
    "                        outputs_list = style_transfer(original[i], C_original[i], reference[i], original_noun[i], GAMMA, AUC_threshold, Save_threshold)\n",
    "                else:\n",
    "                    if Is_it:\n",
    "                        outputs_list = style_transfer_content_it(original[i], C_original[i], reference[i], original_noun[i], \n",
    "                                                                 GAMMA, GAMMAB, AUC_threshold, Save_threshold,Content_type)\n",
    "                    else:\n",
    "                        outputs_list = style_transfer_content(original[i], C_original[i], reference[i], original_noun[i], \n",
    "                                                              GAMMA, GAMMAB, AUC_threshold, Save_threshold, Content_type)\n",
    "                if len(outputs_list) == 0:\n",
    "                    outputs_list.append((original[i], 0., 0., 0., 0, 100))\n",
    "                res.append(outputs_list)\n",
    "                if (i % 100) == 0:\n",
    "                    pickle.dump(res, open('Results/Amazon/res_%s-model%d--GC%sThre%sIt%sCont%s.pkl' % (Model_basic_name, Model_IT, GAMMAB, Save_threshold, Is_it, Content_type),'wb'))\n",
    "                    print('%d %.4f' % (i, time.time() - tic))\n",
    "\n",
    "            pickle.dump(res, open('Results/Amazon/res_%s-model%d--GC%sThre%sIt%sCont%s.pkl' % (Model_basic_name, Model_IT, GAMMAB, Save_threshold, Is_it, Content_type),'wb'))\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Evaluation"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Init BiLSTM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:36:13.283014Z",
     "start_time": "2019-05-15T12:36:02.216715Z"
    }
   },
   "outputs": [],
   "source": [
    "from textBiLSTM import *\n",
    "\n",
    "BATCH_SIZE = 256\n",
    "NUM_EPOCH = 30\n",
    "train_dir ='Model/Amazon/TextBiLSTM/'\n",
    "#MAX_LENGTH = 16\n",
    "import pickle\n",
    "w2id_all, id2w_all, X_indices_all, C_labels_all = pickle.load(open('/workspace/Data/amazon/w2id_id2w_indices_labels_all.pkl','rb'))\n",
    "\n",
    "\n",
    "\n",
    "bilstm_dp = BiLSTM_DP(X_indices_all, C_labels_all, w2id_all,  BATCH_SIZE, n_epoch=NUM_EPOCH, test_data=None)\n",
    "\n",
    "g_bilstm = tf.Graph()\n",
    "sess_bilstm = tf.Session(graph=g_bilstm, config=sess_conf) \n",
    "with sess_bilstm.as_default():\n",
    "    with sess_bilstm.graph.as_default():\n",
    "        B = BiLSTM(\n",
    "            dp = bilstm_dp,\n",
    "            rnn_size = 512,\n",
    "            n_layers = 1,\n",
    "            encoder_embedding_dim = 256,\n",
    "            cell_type = 'lstm',\n",
    "            num_classes = 2,\n",
    "            sess=sess_bilstm\n",
    "        )\n",
    "B.restore('Model/Amazon/TextBiLSTM-appendix/model-14')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Init LM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:36:13.302025Z",
     "start_time": "2019-05-15T12:36:13.286965Z"
    }
   },
   "outputs": [],
   "source": [
    "import kenlm\n",
    "lm = kenlm.Model('/workspace/Moses/YELP_lm/amazon.blm')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Cal PPL"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:50:37.613545Z",
     "start_time": "2019-05-15T12:50:37.598769Z"
    }
   },
   "outputs": [],
   "source": [
    "import os\n",
    "file_name_list = []\n",
    "for r,t,f in os.walk('Results/Amazon/'):\n",
    "    for ff in f:\n",
    "        if 'res_' in ff and '.pkl' in ff:\n",
    "            file_name_list.append(ff)\n",
    "            print(ff)\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:50:38.971651Z",
     "start_time": "2019-05-15T12:50:37.973554Z"
    }
   },
   "outputs": [],
   "source": [
    "# choose first or choose last\n",
    "name2ppl = dict()\n",
    "str2ppl = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Amazon/' + name, 'rb'))\n",
    "    \n",
    "    #print(name)\n",
    "    name2ppl[name] = dict()    \n",
    "    for is_choose_first in [True, False]:\n",
    "        if is_choose_first:\n",
    "            print('first_%s' % name, len(res))\n",
    "        else:\n",
    "            print('last_%s' % name, len(res))\n",
    "  \n",
    "        #str2ppl = dict()\n",
    "        ppl_list = []\n",
    "        str_list = []\n",
    "        #res = res[-1000:]\n",
    "        assert len(res) == 1000\n",
    "        for i,t in enumerate(res):\n",
    "            if is_choose_first:\n",
    "                str_list.append(t[0][0])\n",
    "            else:\n",
    "                str_list.append(t[-1][0])\n",
    "        #print(len(res), len(str_list))\n",
    "        #print(res[0], str_list[0])\n",
    "        assert len(str_list) ==1000\n",
    "        #idx_list = [[w2id[idx] for idx in s.split()] for s in str_list]\n",
    "        for s in str_list:\n",
    "            str2ppl[s] = lm.perplexity(s)\n",
    "            ppl_list.append(str2ppl[s])\n",
    "        if is_choose_first:\n",
    "            name2ppl[name]['first'] = (np.mean(ppl_list[500:]), np.mean(ppl_list[:500]), ppl_list) \n",
    "        else:\n",
    "            name2ppl[name]['last'] = (np.mean(ppl_list[500:]), np.mean(ppl_list[:500]), ppl_list) \n",
    "pickle.dump(name2ppl, open('Results/Amazon/metrics/name2ppl.pkl','wb'))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Cal ACC"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:50:55.552718Z",
     "start_time": "2019-05-15T12:50:41.888502Z"
    }
   },
   "outputs": [],
   "source": [
    "name2acc = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Amazon/' + name, 'rb'))\n",
    "    \n",
    "    print(name)\n",
    "    name2acc[name] = dict()    \n",
    "    for is_choose_first in [True, False]:\n",
    "        if is_choose_first:\n",
    "            print('first_%s' % name, len(res))\n",
    "        else:\n",
    "            print('last_%s.pkl' % name, len(res)) \n",
    "        str_list = []\n",
    "        #res = res[-1000:]\n",
    "        acc_list = []\n",
    "        assert len(res) == 1000\n",
    "        for i,t in enumerate(res):\n",
    "            if is_choose_first:\n",
    "                str_list.append(t[0][0])\n",
    "            else:\n",
    "                str_list.append(t[-1][0])\n",
    "        #print(len(res), len(str_list))\n",
    "        assert len(str_list) ==1000\n",
    "        \n",
    "        #assert len(res) == 1000\n",
    "        idx_list = [[w2id_all.get(idx, w2id_all['<UNK>']) for idx in s.split()] + [w2id_all['<EOS>']] for s in str_list]\n",
    "        pad_x, length_list = pad(idx_list, w2id_all['<PAD>'],move_go=False)\n",
    "        res_class = B.sess.run(B.predictions, {B.input_x: pad_x, \n",
    "                                               B.X_seq_len:length_list,\n",
    "                                               B.output_keep_prob:1.0,\n",
    "                                               B.input_keep_prob:1.0})\n",
    "        acc_cnt = 0\n",
    "        for i in range(1000):\n",
    "            #print(res_class[i], np.argmax(C_original[i]))\n",
    "            if res_class[i] != np.argmax(C_original[i]):\n",
    "                acc_cnt += 1.\n",
    "                acc_list.append(1.)\n",
    "            else:\n",
    "                acc_list.append(0.)\n",
    "        acc = acc_cnt / 1000\n",
    "        assert len(acc_list) == 1000\n",
    "        print('Acc: ', acc, np.mean(acc_list))\n",
    "\n",
    "        if is_choose_first:\n",
    "            name2acc[name]['first'] = (acc, acc_list) \n",
    "        else:\n",
    "            name2acc[name]['last'] = (acc, acc_list)\n",
    "            \n",
    "pickle.dump(name2acc, open('Results/Amazon/metrics/name2acc.pkl','wb'))\n",
    "        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Ablation Study"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T13:03:39.815847Z",
     "start_time": "2019-05-15T13:03:12.542981Z"
    }
   },
   "outputs": [],
   "source": [
    "name2ppl = pickle.load(open('Results/Amazon/metrics/name2ppl.pkl','rb'))\n",
    "name2acc = pickle.load(open('Results/Amazon/metrics/name2acc.pkl','rb'))\n",
    "baselinestr2ppl = pickle.load(open('Results/Amazon/baselines/str2ppl.pkl','rb'))\n",
    "baselines_str = pickle.load(open('Results/Amazon/baselines/baselines_str.pkl','rb'))\n",
    "name2res = dict()\n",
    "name2content = dict()\n",
    "for name in file_name_list:\n",
    "   \n",
    "    res = pickle.load(open('Results/Amazon/' + name, 'rb'))\n",
    "    \n",
    "    name2res[name] = dict()\n",
    "    for is_choose_first in [True, False]:\n",
    "        Mres = []\n",
    "        bleu_list = []\n",
    "        sleu_list = []\n",
    "        max_it_list = []\n",
    "        content_list = []\n",
    "        content_pc_list = []\n",
    "        succ_list = []\n",
    "        word_overlap_list = []\n",
    "        edit_distance_list = []\n",
    "        if is_choose_first:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]['first']\n",
    "            acc, acc_list = name2acc[name]['first']\n",
    "        else:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]['last']\n",
    "            acc, acc_list = name2acc[name]['last']\n",
    "        for i,o in enumerate(res):\n",
    "            succ = 1\n",
    "            nouns = set([t[0] for t in original_noun[i]])\n",
    "\n",
    "            if is_choose_first:\n",
    "                if len(o[0]) == 6:\n",
    "                    s, bleu, sleu, auc, cnt, maxit = o[0]\n",
    "                else:\n",
    "                    s, bleu, sleu, auc, cnt = o[0]\n",
    "                    maxit=0\n",
    "            else:\n",
    "                if len(o[0]) == 6:\n",
    "                    s, bleu, sleu, auc, cnt, maxit = o[-1]\n",
    "                else:\n",
    "                    s, bleu, sleu, auc, cnt = o[-1]\n",
    "                    maxit=0\n",
    "            bleu_list.append(bleu)\n",
    "            sleu_list.append(sleu)\n",
    "            str_list.append(s)\n",
    "            max_it_list.append(maxit)\n",
    "            word_overlap, edit_distance = word_overlap_edit(s, original[i])\n",
    "            word_overlap_list.append(word_overlap)\n",
    "            edit_distance_list.append(edit_distance)\n",
    "                                                            \n",
    "            \n",
    "            if len(original_noun[i])==0:\n",
    "                content_cp = 1.\n",
    "                cnt = 1.\n",
    "            else:\n",
    "                cnt = 0\n",
    "                for n in nouns:\n",
    "                    if n in s:\n",
    "                        cnt += 1\n",
    "\n",
    "                content_cp = min(1, float(cnt) / len(nouns))\n",
    "                #print(s, nouns, content_cp)\n",
    "            content_pc_list.append(content_cp)\n",
    "            if ppl_list[i] > baselinestr2ppl[baselines_str['human'][i]] * 2 or cnt < 1 or acc_list[i] < 1:\n",
    "                #print(s, acc_list[i], cnt, ppl_list[i], baselinestr2ppl[baselines_str['human'][i]])\n",
    "                succ = 0\n",
    "            succ_list.append(succ)\n",
    "            Mres.append((s, bleu, sleu, ppl_list[i], acc_list[i], auc, cnt, content_pc, succ, word_overlap, edit_distance))\n",
    "        \n",
    "    \n",
    "        if is_choose_first:\n",
    "            name2content[name] = np.mean(content_list) * 100\n",
    "            \n",
    "            print('%s & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f \\\\\\\\' % (name, np.mean(acc_list) * 100, np.mean([ppl_neg, ppl_pos]), np.mean(word_overlap_list) * 100, np.mean(content_pc_list) * 100,\n",
    "                                                                 np.mean(bleu_list) * 100, np.mean(succ_list) * 100))\n",
    "    \n",
    "    \n",
    "    \n",
    "        \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Baselines"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:36:23.456151Z",
     "start_time": "2019-05-15T12:36:23.445134Z"
    }
   },
   "outputs": [],
   "source": [
    "import pickle\n",
    "import numpy as np\n",
    "baselines_str = pickle.load(open('Results/Amazon/baselines/baselines_str.pkl','rb'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:36:43.134986Z",
     "start_time": "2019-05-15T12:36:43.127124Z"
    }
   },
   "outputs": [],
   "source": [
    "name2name = dict()\n",
    "name2name['mit'] = 'CrossAligned'\n",
    "name2name['fader'] = 'StyleEmbedding'\n",
    "name2name['label'] = 'DeleteOnly'\n",
    "name2name['orgin'] = 'DeleteAndRetrieve'\n",
    "name2name['rule_base'] = 'TemplateBased'\n",
    "name2name['retrieval'] = 'RetrievalOnly'\n",
    "name2name['human'] = 'Human'\n",
    "name2name['multi_decoder'] = 'MultiDecoder'\n",
    "name2name['bt'] = 'BackTranslation'\n",
    "name2name['org'] = 'Original'\n",
    "baselines_str['org'] =  original"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## PPL"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:36:48.550195Z",
     "start_time": "2019-05-15T12:36:48.452453Z"
    }
   },
   "outputs": [],
   "source": [
    "baseline_name2ppl = dict()\n",
    "str2ppl = dict()\n",
    "\n",
    "for name in baselines_str:\n",
    "    ppl_list = []\n",
    "    str_list = baselines_str[name]\n",
    "\n",
    "    for s in str_list:\n",
    "        str2ppl[s] = lm.perplexity(s)\n",
    "        ppl_list.append(str2ppl[s])\n",
    "    ppl_neg = np.mean(ppl_list[500:])   \n",
    "    ppl_pos = np.mean(ppl_list[:500]) \n",
    "    \n",
    "    baseline_name2ppl[name2name[name]] = (np.mean([ppl_neg, ppl_pos]), ppl_neg, ppl_pos)\n",
    "    \n",
    "pickle.dump(baseline_name2ppl, open('Results/Amazon/baselines/name2PPL_res.pkl','wb'))\n",
    "pickle.dump(str2ppl, open('Results/Amazon/baselines/str2ppl.pkl','wb'))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## ACC"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:36:52.711261Z",
     "start_time": "2019-05-15T12:36:51.370683Z"
    }
   },
   "outputs": [],
   "source": [
    "str2class = dict()\n",
    "for name in baselines_str:\n",
    "    \n",
    "    str_list = baselines_str[name]\n",
    "    print(name, len(str_list))\n",
    "    idx_list = [[w2id_all.get(idx, w2id_all['<UNK>']) for idx in s.split()] + [w2id_all['<EOS>']] for s in str_list]\n",
    "    \n",
    "    pad_x, length_list = pad(idx_list, w2id_all['<PAD>'],move_go=False)\n",
    "    res_class = B.sess.run(B.predictions, {B.input_x: pad_x, \n",
    "                                           B.X_seq_len:length_list,\n",
    "                                           B.output_keep_prob:1.0,\n",
    "                                           B.input_keep_prob:1.0})\n",
    "    acc_cnt = 0\n",
    "    \n",
    "\n",
    "    for i,s in enumerate(str_list):\n",
    "        str2class[s] = res_class[i]\n",
    "            \n",
    "print(len(str2class))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Show"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:42:04.686406Z",
     "start_time": "2019-05-15T12:41:58.426270Z"
    }
   },
   "outputs": [],
   "source": [
    "import Util.mybleu\n",
    "\n",
    "baselines_ = dict()\n",
    "\n",
    "for name in baselines_str:\n",
    "    print(name)\n",
    "    baselines_[name] = []\n",
    "    for i,s in enumerate(baselines_str[name]):\n",
    "        succ = 1\n",
    "        nouns = set([t[0] for t in original_noun[i]])\n",
    "        bleu = mybleu.BLEU([s], [[reference[i]]])[1]\n",
    "        sleu = mybleu.BLEU([s], [[original[i]]])[1]\n",
    "        cnt = 0\n",
    "        for w in s.split():\n",
    "            if w in nouns:\n",
    "                cnt += 1\n",
    "        pred = str2class[s]\n",
    "        ppl = str2ppl[s]\n",
    "        word_overlap, edit_distance = word_overlap_edit(s, original[i])\n",
    "        \n",
    "        if pred != np.argmax(C_original[i]):\n",
    "            acc = 1.\n",
    "        else:\n",
    "            acc = 0.\n",
    "        \n",
    "        if len(original_noun[i])==0:\n",
    "            content_cp = 1.\n",
    "            cnt = 1.\n",
    "        else:\n",
    "            cnt = 0\n",
    "            for n in nouns:\n",
    "                if n in s:\n",
    "                    cnt += 1\n",
    "            \n",
    "            content_cp = min(1, float(cnt) / len(nouns))\n",
    "        auc = 0.\n",
    "        if ppl > str2ppl[baselines_str['human'][i]] * 2 or cnt < 1 or acc < 1:\n",
    "            succ = 0\n",
    "        baselines_[name].append((s, bleu, sleu, ppl, acc, pred, auc, cnt, content_cp, succ, word_overlap, edit_distance))\n",
    "    print(len(baselines_[name]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-15T12:42:04.720236Z",
     "start_time": "2019-05-15T12:42:04.688854Z"
    }
   },
   "outputs": [],
   "source": [
    "\n",
    "for name in baselines_:\n",
    "    acc_list = []\n",
    "    bleu_list = []\n",
    "    sleu_list = []\n",
    "    content_list = []\n",
    "    content_pc_list = []\n",
    "    edit_distance_list = []\n",
    "    succ_list = []\n",
    "    word_overlap_list = []\n",
    "    ppl_list = []\n",
    "    #print(name)\n",
    "    for i,o in enumerate(baselines_[name]):\n",
    "        s, bleu, sleu, ppl, acc, pred, auc, cnt, content_pc, succ, word_overlap, edit_distance = o\n",
    "        acc_list.append(acc)\n",
    "        ppl_list.append(ppl)\n",
    "        succ_list.append(succ)\n",
    "        bleu_list.append(bleu)\n",
    "        sleu_list.append(sleu)\n",
    "        content_list.append(cnt)\n",
    "        content_pc_list.append(content_pc)\n",
    "        word_overlap_list.append(word_overlap)\n",
    "        edit_distance_list.append(edit_distance)\n",
    "    ppl, ppl_neg, ppl_pos = baseline_name2ppl[name2name[name]]\n",
    "    #print(name2name[name])\n",
    "    print('%s & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f \\\\\\\\' % (name2name[name], np.mean(acc_list) * 100, np.mean([ppl_neg, ppl_pos]), np.mean(word_overlap_list) * 100, np.mean(content_pc_list) * 100,\n",
    "                                                                 np.mean(bleu_list) * 100, np.mean(succ_list) * 100))\n",
    "    \n",
    "    \n",
    "#print('')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {
    "height": "calc(100% - 180px)",
    "left": "10px",
    "top": "150px",
    "width": "244px"
   },
   "toc_section_display": true,
   "toc_window_display": true
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
