{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# prototype constrained decoding with INMT models\n",
    "\n",
    "# build the simplest possible interface to a trained NMT model\n",
    "# define the payload for NMT hypotheses\n",
    "\n",
    "# the interface between NMT and constrained decoding needs to know how to create ConstrainedHypothesis objects\n",
    "\n",
    "import copy\n",
    "\n",
    "import numpy as np\n",
    "from collections import defaultdict, OrderedDict\n",
    "\n",
    "from constrained_decoding import ConstraintHypothesis\n",
    "from constrained_decoding.translation_model import AbstractConstrainedTM\n",
    "\n",
    "from nn_imt import IMTPredictor\n",
    "from neural_mt.machine_translation.configurations import get_config"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "IMT_CONFIGURATION_FILE = '/home/chris/projects/neural_imt/experiments/configs/demos/en-de/en-de_interactive_demo.yaml'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class NeuralTranslationModel(AbstractConstrainedTM):\n",
    "    \n",
    "    def __init__(self, config_file):\n",
    "        \"\"\"Intitialize the model according to user provided configuration\n",
    "        \n",
    "        - follow the style of BeamSearch, but remove the search logic\n",
    "        - build the graph and load the parameters (i.e. create a Predictor and expose the right functions)\n",
    "        \"\"\"\n",
    "        \n",
    "        self.imt_model = IMTPredictor(get_config(config_file))\n",
    "        self.imt_beam_search = self.imt_model.beam_search\n",
    "        self.eos_token = u'</S>'\n",
    "        \n",
    "    def build_input_representations(self, source_tokens, constraint_token_seqs):\n",
    "        \"\"\"Encode the input sequences using the source and target word-->idx maps\"\"\"\n",
    "        # TODO: add tokenization, subword encoding\n",
    "\n",
    "        source_seq = self.imt_model.map_idx_or_unk(source_tokens,\n",
    "                                                   self.imt_model.src_vocab,\n",
    "                                                   self.imt_model.unk_idx)\n",
    "\n",
    "        # Note: we assume that constraints are in the target language\n",
    "        constraint_seqs = []\n",
    "        for token_seq in constraint_token_seqs:\n",
    "            token_idxs = self.imt_model.map_idx_or_unk(token_seq,\n",
    "                                                       self.imt_model.trg_vocab,\n",
    "                                                       self.imt_model.unk_idx)\n",
    "            constraint_seqs.append(token_idxs)\n",
    "        \n",
    "        source_seq = np.tile(source_seq, (1, 1))\n",
    "        \n",
    "        # TODO: we'll need to tile constraint_seqs up to beam_size for NMT models that take constraints as inputs\n",
    "        #input_ = numpy.tile(seq, (self.exp_config['beam_size'], 1))\n",
    "    \n",
    "        return (source_seq, constraint_seqs)\n",
    "        \n",
    "    # TODO: remove target_prefix from args (see below)\n",
    "    def start_hypothesis(self, source_seq, target_prefix, constraints, coverage=None):\n",
    "        \"\"\"\n",
    "        Build the start hyp for a neural translation model.\n",
    "        Models may or may not use constraints. I.e. by modeling\n",
    "        the probability of generating vs. copying from the constraints. \n",
    "        \n",
    "        \"\"\"\n",
    "        \n",
    "        # TODO: there SHOULD BE no self.target_sampling_input because we don't use the prefix representation in constrained\n",
    "        # input_values = {:class:`~theano.Variable`: :class:`~numpy.ndarray`}\n",
    "        input_values = {\n",
    "            self.imt_model.source_sampling_input: source_seq,\n",
    "            self.imt_model.target_sampling_input: target_prefix\n",
    "        }\n",
    "        \n",
    "        # Note that the initial input of an NMT model is currently implicit (i.e. Readout.initial_input)\n",
    "        contexts, states, beam_size = self.imt_beam_search.compute_initial_states_and_contexts(inputs=input_values)\n",
    "\n",
    "        # Note: explicit initialization of coverage\n",
    "        coverage = [np.zeros(l, dtype='int16') for l in [len(s) for s in constraints]]\n",
    "    \n",
    "        # the payload contains everything that the next timestep will need to generate another output\n",
    "        payload = {\n",
    "            'contexts': contexts,\n",
    "            'states': states,\n",
    "            # input_values is here because of a bug in getting beam-size from the graph\n",
    "            'input_values': input_values\n",
    "        }\n",
    "    \n",
    "        start_hyp = ConstraintHypothesis(\n",
    "            token=None,\n",
    "            score=None, \n",
    "            coverage=coverage,\n",
    "            constraints=constraints,\n",
    "            payload=payload,\n",
    "            backpointer=None,\n",
    "            constraint_index=None,\n",
    "            unfinished_constraint=False\n",
    "        )\n",
    "        \n",
    "        return start_hyp\n",
    "        \n",
    "    def generate(self, hyp, n_best):\n",
    "        \"\"\"\n",
    "        Note: the `n_best` parameter here is only used to limit the number of hypothesis objects that are generated\n",
    "        from the input hyp, the beam implementation may specify a different `n_best`\n",
    "        \n",
    "        \n",
    "        \"\"\"\n",
    "        \n",
    "        # if we already generated EOS, theres only one option -- just continue it and copy the cost\n",
    "        if hyp.token == self.eos_token:\n",
    "            new_hyp = ConstraintHypothesis(\n",
    "                token=self.eos_token,\n",
    "                score=hyp.score, \n",
    "                coverage=copy.deepcopy(hyp.coverage),\n",
    "                constraints=hyp.constraints,\n",
    "                payload=hyp.payload,\n",
    "                backpointer=hyp,\n",
    "                constraint_index=None,\n",
    "                unfinished_constraint=False\n",
    "            )\n",
    "            return [new_hyp]\n",
    "            \n",
    "        logprobs = self.imt_beam_search.compute_logprobs(hyp.payload['input_values'],\n",
    "                                                         hyp.payload['contexts'],\n",
    "                                                         hyp.payload['states'])\n",
    "        \n",
    "        assert len(logprobs) == 1, 'NMT logprob logic depends upon logprobs only having one row'\n",
    "        n_best_outputs = np.argsort(logprobs.flatten())[:n_best]\n",
    "        chosen_costs = logprobs.flatten()[n_best_outputs]\n",
    "        \n",
    "        # generate n_best ConstrainedHypothesis for each item on the beam, return them all\n",
    "        # argsort logprobs\n",
    "        payload = hyp.payload\n",
    "        \n",
    "        # Note: it's critical to use the OrderedDict here, otherwise args will get out of order in theano funcs\n",
    "        tiled_payload = defaultdict(OrderedDict)\n",
    "        tiled_payload['contexts']['attended'] = np.tile(payload['contexts']['attended'], (1, n_best, 1))\n",
    "        tiled_payload['contexts']['attended_mask'] = np.tile(payload['contexts']['attended_mask'], (1, n_best))\n",
    "\n",
    "#         [(k, start_hyp.payload['contexts'][k].shape) for k in start_hyp.payload['contexts'].keys()]\n",
    "#         [('attended', (6, 1, 2000)), ('attended_mask', (6, 1))]\n",
    "\n",
    "        tiled_payload['states']['outputs'] = np.tile(payload['states']['outputs'], n_best)\n",
    "        tiled_payload['states']['states'] = np.tile(payload['states']['states'], (n_best, 1))\n",
    "        tiled_payload['states']['weights'] = np.tile(payload['states']['weights'], (n_best, 1))\n",
    "        tiled_payload['states']['weighted_averages'] = np.tile(payload['states']['weighted_averages'], (n_best, 1))\n",
    "\n",
    "# [(k, start_hyp.payload['states'][k].shape) for k in start_hyp.payload['states'].keys()]\n",
    "# [('outputs', (1,)),\n",
    "#  ('states', (1, 1000)),\n",
    "#  ('weights', (1, 6)),\n",
    "#  ('weighted_averages', (1, 2000))]\n",
    "\n",
    "        tiled_payload['input_values'][self.imt_model.source_sampling_input] = np.tile(payload['input_values'][self.imt_model.source_sampling_input], \n",
    "                                                                                      (n_best, 1))\n",
    "        tiled_payload['input_values'][self.imt_model.target_sampling_input] = np.tile(payload['input_values'][self.imt_model.target_sampling_input], \n",
    "                                                                                      (n_best, 1))\n",
    "\n",
    "# [(k, start_hyp.payload['input_values'][k].shape) for k in start_hyp.payload['input_values'].keys()]\n",
    "# [(sampling_input, (1, 6)), (sampling_target_prefix, (1, 2))]\n",
    "\n",
    "\n",
    "        # Now we need to tile the previous hyp values to make this work\n",
    "        next_states = self.imt_beam_search.compute_next_states(tiled_payload['input_values'],\n",
    "                                                               tiled_payload['contexts'],\n",
    "                                                               tiled_payload['states'],\n",
    "                                                               n_best_outputs)\n",
    "\n",
    "        # create ContstrainedHypothesis objects from these states (tile back down to one)\n",
    "        new_hyps = []\n",
    "        for hyp_idx in range(n_best):\n",
    "            new_payload = defaultdict(OrderedDict)\n",
    "            new_payload['contexts'] = payload['contexts']\n",
    "\n",
    "            new_payload['states']['outputs'] = np.atleast_1d(next_states['outputs'][hyp_idx])\n",
    "            new_payload['states']['states'] = np.atleast_2d(next_states['states'][hyp_idx])\n",
    "            new_payload['states']['weights'] = np.atleast_2d(next_states['weights'][hyp_idx])\n",
    "            new_payload['states']['weighted_averages'] = np.atleast_2d(next_states['weighted_averages'][hyp_idx])\n",
    "# [('outputs', (1,)),\n",
    "#  ('states', (1, 1000)),\n",
    "#  ('weights', (1, 6)),\n",
    "#  ('weighted_averages', (1, 2000))]\n",
    "            \n",
    "            \n",
    "            new_payload['input_values'] = hyp.payload['input_values']\n",
    "                \n",
    "            # TODO: account for EOS continuations -- i.e. make other costs infinite\n",
    "            if hyp.score is not None:\n",
    "                next_score = hyp.score + chosen_costs[hyp_idx]\n",
    "            else:\n",
    "                # hyp.score is None for the start hyp\n",
    "                next_score = chosen_costs[hyp_idx]\n",
    "                \n",
    "            new_hyp = ConstraintHypothesis(\n",
    "                token=self.imt_model.trg_ivocab[n_best_outputs[hyp_idx]],\n",
    "                score=next_score, \n",
    "                coverage=copy.deepcopy(hyp.coverage),\n",
    "                constraints=hyp.constraints,\n",
    "                payload=new_payload,\n",
    "                backpointer=hyp,\n",
    "                constraint_index=None,\n",
    "                unfinished_constraint=False\n",
    "            )\n",
    "\n",
    "            new_hyps.append(new_hyp)\n",
    "            \n",
    "        return new_hyps\n",
    "        \n",
    "        \n",
    "        # The additional dim (`None`) is needed to maintain 2d, and to\n",
    "        # make the broadcasting of `logprobs * all_masks[-1, :, None] work\n",
    "#         next_costs = (all_costs[-1, :, None] +\n",
    "#                       logprobs * all_masks[-1, :, None])\n",
    "#         (finished,) = numpy.where(all_masks[-1] == 0)\n",
    "        \n",
    "        # WORKING: see if we can generate one timestep from start hyp\n",
    "        \n",
    "        # we always start from the beginning of the sequence, so we always need initial states\n",
    "        # the initial states and contexts are the payload of the \"start_hyp\"\n",
    "        # states['outputs']\n",
    "        # states['weights']\n",
    "        \n",
    "        # This array will store all generated outputs, including those from\n",
    "        # previous step and those from already finished sequences.\n",
    "        #all_outputs = states['outputs'][None, :]\n",
    "        #all_masks = numpy.ones_like(all_outputs, dtype=config.floatX)\n",
    "        #all_costs = numpy.zeros_like(all_outputs, dtype=config.floatX)\n",
    "\n",
    "        # Chris: get the glimpse weights as well\n",
    "        #prev_glimpses = states['weights'][None, :]\n",
    "        #all_glimpses = numpy.zeros_like(prev_glimpses, dtype=config.floatX)\n",
    "\n",
    "        # Note: confidence at timestep zero is always = 1\n",
    "        #all_confidences = numpy.ones_like(all_outputs, dtype=config.floatX)\n",
    "            \n",
    "    def generate_constrained(self, hyp):\n",
    "        \"\"\"Use hyp.constraints and hyp.coverage to return new hypothesis which start constraints\n",
    "        that are not yet covered by this hypothesis.\n",
    "        \n",
    "        \"\"\"\n",
    "        assert hyp.unfinished_constraint is not True, 'hyp must not be part of an unfinished constraint'\n",
    "        \n",
    "        new_constraint_hyps = []\n",
    "        available_constraints = hyp.constraint_candidates()\n",
    "        \n",
    "        # TODO: if the model knows about constraints, getting the score from the model must be done differently\n",
    "        # TODO: currently, according to the model, there is no difference between generating and choosing from constraints \n",
    "        logprobs = self.imt_beam_search.compute_logprobs(hyp.payload['input_values'],\n",
    "                                                         hyp.payload['contexts'],\n",
    "                                                         hyp.payload['states']).flatten()\n",
    "        for idx in available_constraints:\n",
    "            # start new constraints\n",
    "            constraint_idx = hyp.constraints[idx][0]\n",
    "\n",
    "            next_states = self.imt_beam_search.compute_next_states(hyp.payload['input_values'],\n",
    "                                                                   hyp.payload['contexts'],\n",
    "                                                                   hyp.payload['states'],\n",
    "                                                                   np.atleast_1d(constraint_idx))\n",
    "\n",
    "            new_payload = defaultdict(OrderedDict)\n",
    "            new_payload['contexts'] = hyp.payload['contexts']\n",
    "\n",
    "            new_payload['states'] = next_states\n",
    "            \n",
    "            new_payload['input_values'] = hyp.payload['input_values']\n",
    "            \n",
    "            \n",
    "            # get the score for this token from the logprobs\n",
    "            if hyp.score is not None:\n",
    "                next_score = hyp.score + logprobs[constraint_idx]\n",
    "            else:\n",
    "                # hyp.score is None for the start hyp\n",
    "                next_score = logprobs[constraint_idx]\n",
    "                \n",
    "            coverage = copy.deepcopy(hyp.coverage)\n",
    "            coverage[idx][0] = 1\n",
    "            \n",
    "            if len(coverage[idx]) > 1:\n",
    "                unfinished_constraint = True\n",
    "            else:\n",
    "                unfinished_constraint = False\n",
    "            \n",
    "            # TODO: if the model knows about constraints, getting the score from the model must be done differently\n",
    "            new_hyp = ConstraintHypothesis(token=self.imt_model.trg_ivocab[constraint_idx],\n",
    "                                           score=next_score,\n",
    "                                           coverage=coverage,\n",
    "                                           constraints=hyp.constraints,\n",
    "                                           payload=new_payload,\n",
    "                                           backpointer=hyp,\n",
    "                                           constraint_index=(idx, 0),\n",
    "                                           unfinished_constraint=unfinished_constraint\n",
    "                                          )\n",
    "            new_constraint_hyps.append(new_hyp)\n",
    "        \n",
    "        return new_constraint_hyps \n",
    "    \n",
    "    def continue_constrained(self, hyp):\n",
    "        assert hyp.unfinished_constraint is True, 'hyp must be part of an unfinished constraint'\n",
    "        \n",
    "        # TODO: if the model knows about constraints, getting the score from the model must be done differently\n",
    "        # TODO: currently, according to the model, there is no difference between generating and choosing from constraints \n",
    "        logprobs = self.imt_beam_search.compute_logprobs(hyp.payload['input_values'],\n",
    "                                                         hyp.payload['contexts'],\n",
    "                                                         hyp.payload['states']).flatten()\n",
    "        \n",
    "\n",
    "        constraint_row_index = hyp.constraint_index[0]\n",
    "        # the index of the next token in the constraint\n",
    "        constraint_tok_index = hyp.constraint_index[1] + 1\n",
    "        constraint_index = (constraint_row_index, constraint_tok_index)\n",
    "        \n",
    "        continued_constraint_token = hyp.constraints[constraint_index[0]][constraint_index[1]]\n",
    "\n",
    "        # get the score for this token from the logprobs\n",
    "        if hyp.score is not None:\n",
    "            next_score = hyp.score + logprobs[continued_constraint_token]\n",
    "        else:\n",
    "            # hyp.score is None for the start hyp\n",
    "            next_score = logprobs[continued_constraint_token]\n",
    "        \n",
    "        coverage = copy.deepcopy(hyp.coverage)\n",
    "        coverage[constraint_row_index][constraint_tok_index] = 1\n",
    "\n",
    "        if len(hyp.constraints[constraint_row_index]) > constraint_tok_index + 1:\n",
    "            unfinished_constraint = True\n",
    "        else:\n",
    "            unfinished_constraint = False\n",
    "\n",
    "        next_states = self.imt_beam_search.compute_next_states(hyp.payload['input_values'],\n",
    "                                                               hyp.payload['contexts'],\n",
    "                                                               hyp.payload['states'],\n",
    "                                                               np.atleast_1d(continued_constraint_token))\n",
    "   \n",
    "        new_payload = defaultdict(OrderedDict)\n",
    "        new_payload['contexts'] = hyp.payload['contexts']\n",
    "\n",
    "        new_payload['states'] = next_states\n",
    "            \n",
    "        new_payload['input_values'] = hyp.payload['input_values']\n",
    "\n",
    "        new_hyp = ConstraintHypothesis(token=self.imt_model.trg_ivocab[continued_constraint_token],\n",
    "                                       score=next_score,\n",
    "                                       coverage=coverage,\n",
    "                                       constraints=hyp.constraints,\n",
    "                                       payload=new_payload,\n",
    "                                       backpointer=hyp,\n",
    "                                       constraint_index=constraint_index,\n",
    "                                       unfinished_constraint=unfinished_constraint)\n",
    "\n",
    "        return new_hyp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# the mask performs several functions, which serve to tell us the point when a hypothesis starts to end with <EOS>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:blocks.bricks.recurrent:unknown input sequencegenerator_generate_target_prefix\n",
      "\n",
      "Your function uses a non-shared variable other than those given by scan explicitly. That can significantly slow down `tensor.grad` call. Did you forget to declare it in `contexts`?\n",
      "INFO:machine_translation.checkpoint:Note that the delimeter for parameter loading is currently hacked\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000,)        : /bidirectionalencoder/bidirectionalwmt15/backward.initial_state\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (2000,)        : /bidirectionalencoder/back_fork/fork_gate_inputs.b\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (300, 2000)    : /bidirectionalencoder/back_fork/fork_gate_inputs.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000,)        : /bidirectionalencoder/back_fork/fork_inputs.b\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (300, 1000)    : /bidirectionalencoder/back_fork/fork_inputs.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000,)        : /bidirectionalencoder/bidirectionalwmt15/forward.initial_state\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (2000,)        : /bidirectionalencoder/fwd_fork/fork_gate_inputs.b\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (300, 2000)    : /bidirectionalencoder/fwd_fork/fork_gate_inputs.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000,)        : /bidirectionalencoder/fwd_fork/fork_inputs.b\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (300, 1000)    : /bidirectionalencoder/fwd_fork/fork_inputs.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000,)        : /decoder/sequencegenerator/fork/fork_inputs.b\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (300, 1000)    : /decoder/sequencegenerator/fork/fork_inputs.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (2000,)        : /decoder/sequencegenerator/fork/fork_gate_inputs.b\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (300, 2000)    : /decoder/sequencegenerator/fork/fork_gate_inputs.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000,)        : /decoder/sequencegenerator/att_trans/attention/preprocess.b\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (2000, 1000)   : /decoder/sequencegenerator/att_trans/attention/preprocess.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000,)        : /decoder/sequencegenerator/att_trans/decoder/state_initializer/linear_0.b\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (80000, 300)   : /bidirectionalencoder/embeddings.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (90000, 300)   : /decoder/sequencegenerator/readout/lookupfeedbackwmt15/lookuptable.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000, 2000)   : /bidirectionalencoder/bidirectionalwmt15/forward.state_to_gates\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000, 1000)   : /bidirectionalencoder/bidirectionalwmt15/forward.state_to_state\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000, 2000)   : /bidirectionalencoder/bidirectionalwmt15/backward.state_to_gates\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000, 1000)   : /bidirectionalencoder/bidirectionalwmt15/backward.state_to_state\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000, 1000)   : /decoder/sequencegenerator/att_trans/decoder/state_initializer/linear_0.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000, 2000)   : /decoder/sequencegenerator/att_trans/decoder.state_to_gates\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000, 1000)   : /decoder/sequencegenerator/att_trans/attention/state_trans/transform_states.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000, 1)      : /decoder/sequencegenerator/att_trans/attention/energy_comp/linear.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (2000, 2000)   : /decoder/sequencegenerator/att_trans/distribute/fork_gate_inputs.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000, 1000)   : /decoder/sequencegenerator/att_trans/decoder.state_to_state\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (2000, 1000)   : /decoder/sequencegenerator/att_trans/distribute/fork_inputs.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000, 1000)   : /decoder/sequencegenerator/readout/merge/transform_states.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (300, 1000)    : /decoder/sequencegenerator/readout/merge/transform_feedback.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (2000, 1000)   : /decoder/sequencegenerator/readout/merge/transform_weighted_averages.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (1000,)        : /decoder/sequencegenerator/readout/initializablefeedforwardsequence/maxout_bias.b\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (500, 300)     : /decoder/sequencegenerator/readout/initializablefeedforwardsequence/softmax0.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (300, 90000)   : /decoder/sequencegenerator/readout/initializablefeedforwardsequence/softmax1.W\n",
      "INFO:machine_translation.checkpoint: Loaded to CG (90000,)       : /decoder/sequencegenerator/readout/initializablefeedforwardsequence/softmax1.b\n",
      "WARNING:machine_translation.checkpoint: Parameter does not exist: /decoder/sequencegenerator/initializablefeedforwardsequence/confidence_model1.W\n",
      "WARNING:machine_translation.checkpoint: Parameter does not exist: /decoder/sequencegenerator/initializablefeedforwardsequence/confidence_model1.b\n",
      "WARNING:machine_translation.checkpoint: Parameter does not exist: /decoder/sequencegenerator/initializablefeedforwardsequence/confidence_model2.W\n",
      "WARNING:machine_translation.checkpoint: Parameter does not exist: /decoder/sequencegenerator/initializablefeedforwardsequence/confidence_model2.b\n",
      "WARNING:machine_translation.checkpoint: Parameter does not exist: /decoder/sequencegenerator/initializablefeedforwardsequence/confidence_model3.W\n",
      "WARNING:machine_translation.checkpoint: Parameter does not exist: /decoder/sequencegenerator/initializablefeedforwardsequence/confidence_model3.b\n",
      "INFO:machine_translation.checkpoint: Number of parameters loaded for computation graph: 37\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss function is: cross_entropy\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/chris/projects/neural_imt/nn_imt/search/__init__.py:123: UserWarning: theano.function was asked to create a function computing outputs given certain inputs, but the provided input variable at index 0 is not part of the computational graph needed to compute the outputs: sampling_input.\n",
      "To make this warning into an error, you can pass the parameter on_unused_input='raise' to theano.function. To disable it completely, use on_unused_input='ignore'.\n",
      "  self.inputs + self.contexts + self.input_states + next_outputs, next_states, on_unused_input='warn')\n",
      "/home/chris/projects/neural_imt/nn_imt/search/__init__.py:123: UserWarning: theano.function was asked to create a function computing outputs given certain inputs, but the provided input variable at index 1 is not part of the computational graph needed to compute the outputs: sampling_target_prefix.\n",
      "To make this warning into an error, you can pass the parameter on_unused_input='raise' to theano.function. To disable it completely, use on_unused_input='ignore'.\n",
      "  self.inputs + self.contexts + self.input_states + next_outputs, next_states, on_unused_input='warn')\n",
      "/home/chris/projects/neural_imt/nn_imt/search/__init__.py:123: UserWarning: theano.function was asked to create a function computing outputs given certain inputs, but the provided input variable at index 5 is not part of the computational graph needed to compute the outputs: sequencegenerator_generate_outputs.\n",
      "To make this warning into an error, you can pass the parameter on_unused_input='raise' to theano.function. To disable it completely, use on_unused_input='ignore'.\n",
      "  self.inputs + self.contexts + self.input_states + next_outputs, next_states, on_unused_input='warn')\n"
     ]
    }
   ],
   "source": [
    "imt_tm = NeuralTranslationModel(IMT_CONFIGURATION_FILE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "source_input = u'<S> This is a test .</S>'.split()\n",
    "target_prefix = u'<S>'.split()\n",
    "constraint_2 = u'gewesen'.split()\n",
    "constraint_1 = u'wichtiger'.split()\n",
    "\n",
    "constraint_seq = [constraint_1, constraint_2]\n",
    "\n",
    "source_, constraints_ = imt_tm.build_input_representations(source_input, constraint_seq)\n",
    "\n",
    "# TODO: this is a hack until we remove the target_prefix completely from the graph\n",
    "target_prefix_ = imt_tm.imt_model.map_idx_or_unk(target_prefix,\n",
    "                                                 imt_tm.imt_model.trg_vocab,\n",
    "                                                 imt_tm.imt_model.unk_idx)\n",
    "\n",
    "target_prefix_ = np.tile(target_prefix_, (1, 1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "start_hyp = imt_tm.start_hypothesis(source_seq=source_, target_prefix=target_prefix_, \n",
    "                                    constraints=constraints_)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# try full constrained decoding\n",
    "from constrained_decoding import ConstrainedDecoder, Beam\n",
    "\n",
    "decoder = ConstrainedDecoder(hyp_generation_func=imt_tm.generate,\n",
    "                             constraint_generation_func=imt_tm.generate_constrained,\n",
    "                             continue_constraint_func=imt_tm.continue_constrained,\n",
    "                             beam_implementation=Beam)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "search_grid = decoder.search(start_hyp=start_hyp, constraints=constraints_, max_hyp_len=10, beam_size=5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "import codecs\n",
    "\n",
    "# Experiment 1 -- load src + refs\n",
    "# Go through one PRIMT cycle\n",
    "# output\n",
    "#     - hyps for cycle 1\n",
    "#     - hyps for cycle 2\n",
    "# measure and compare performance before and after a PRIMT cycle\n",
    "source_file = '/home/chris/projects/constrained_decoding/proto/sample_data/newstest2013.en.bpe.tok'\n",
    "target_file = '/home/chris/projects/constrained_decoding/proto/sample_data/newstest2013.de.bpe.tok'\n",
    "\n",
    "with codecs.open(source_file, encoding='utf8') as inp:\n",
    "    source_segs = [l.strip().split() for l in inp.read().strip().split('\\n')]\n",
    "with codecs.open(target_file, encoding='utf8') as inp:\n",
    "    target_segs = [l.strip().split() for l in inp.read().strip().split('\\n')]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "3000"
      ]
     },
     "execution_count": 12,
     "output_type": "execute_result",
     "metadata": {}
    }
   ],
   "source": [
    "len(source_segs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "eos_token = u'</S>'\n",
    "\n",
    "# Note empty prefix -- just a placeholder to make the IMT model work for general decoding\n",
    "target_prefix = u'<S>'.split()\n",
    "\n",
    "# TODO: the empty prefix is a hack until we remove the target_prefix completely from the graph\n",
    "target_prefix_ = imt_tm.imt_model.map_idx_or_unk(target_prefix,\n",
    "                                                 imt_tm.imt_model.trg_vocab,\n",
    "                                                 imt_tm.imt_model.unk_idx)\n",
    "\n",
    "target_prefix_ = np.tile(target_prefix_, (1, 1))\n",
    "\n",
    "# This function does the pre and postprocessing\n",
    "def decode_with_constraints(source_sequence, constraint_seq=[]):\n",
    "    source_, constraints_ = imt_tm.build_input_representations(source_seg, constraint_seq)\n",
    "\n",
    "    start_hyp = imt_tm.start_hypothesis(source_seq=source_, target_prefix=target_prefix_, \n",
    "                                        constraints=constraints_)\n",
    "\n",
    "    search_grid = decoder.search(start_hyp=start_hyp, constraints=constraints_, \n",
    "                                 max_hyp_len=int(round(len(source_sequence) * 1.5)), \n",
    "                                 beam_size=5)\n",
    "\n",
    "    top_row = max(k[1] for k in search_grid.keys())\n",
    "\n",
    "    if top_row > 1:\n",
    "        output_beams = [search_grid[k] for k in search_grid.keys() if k[1] == top_row]\n",
    "    else:\n",
    "        # constraints seq is empty\n",
    "        # Note this is a very hackish way to get the last beam\n",
    "        output_beams = [search_grid[search_grid.keys()[-1]]]\n",
    "\n",
    "    output_hyps = [h for beam in output_beams for h in beam]\n",
    "\n",
    "    # getting the true length of each hypothesis\n",
    "    true_lens = [h.sequence.index(eos_token) if eos_token in h.sequence else len(h.sequence)\n",
    "                 for h in output_hyps]\n",
    "    true_lens = [float(l) for l in true_lens]\n",
    "\n",
    "    output_seqs = [(h.sequence, h.score / true_len) for h, true_len in zip(output_hyps, true_lens)]\n",
    "    output_seqs = sorted(output_seqs, key=lambda x: x[1])\n",
    "\n",
    "    best_hyp = output_seqs[0]\n",
    "    return best_hyp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_max_ref_constraint(hyp, ref, max_constraint_cutoff=5):\n",
    "    ref_constraints = []\n",
    "    hyp_toks = set(hyp)\n",
    "\n",
    "    current_sub_seq = []\n",
    "    for tok in ref:\n",
    "        if not tok in hyp_toks:\n",
    "            current_sub_seq.append(tok)\n",
    "        else:\n",
    "            if len(current_sub_seq) > 0:\n",
    "                ref_constraints.append(current_sub_seq)\n",
    "                current_sub_seq = []\n",
    "    if len(current_sub_seq) > 0:\n",
    "        ref_constraints.append(current_sub_seq)\n",
    "    \n",
    "    longest_constraint_idx = 0\n",
    "    len_longest = 0\n",
    "    for c_i, c in enumerate(ref_constraints):\n",
    "        if len(c) > len_longest:\n",
    "            len_longest = len(c)\n",
    "            longest_constraint_idx = c_i\n",
    "    \n",
    "    longest_constraint = ref_constraints[longest_constraint_idx][:max_constraint_cutoff]\n",
    "    return (ref_constraints, longest_constraint)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "current idx: 0\n",
      "Cycle 1: [None, u'republi@@', u'kanische', u'Strategie', u',', u'um', u'der', u'Wiederwahl', u'von', u'Obama', u'entgegenzuwirken', u'.', u'</S>', u'</S>', u'</S>']"
     ]
    }
   ],
   "source": [
    "primt_cycle_1_output = 'newstest2013.cycle_1.hyps.de'\n",
    "primt_cycle_2_output = 'newstest2013.cycle_2.hyps.de'\n",
    "\n",
    "# overwrite old files\n",
    "open(primt_cycle_1_output, 'w')\n",
    "open(primt_cycle_2_output, 'w')\n",
    "\n",
    "for seg_idx, (source_seg, reference) in enumerate(zip(source_segs, target_segs)):\n",
    "    print('current idx: {}'.format(seg_idx))\n",
    "    cycle_1_hyp, cycle_1_hyp_score = decode_with_constraints(source_seg)\n",
    "    print('Cycle 1: {}'.format(cycle_1_hyp))\n",
    "    all_ref_constraints, max_ref_constraint = get_max_ref_constraint(cycle_1_hyp, reference)\n",
    "    print('Max ref constraint: {}'.format(max_ref_constraint))\n",
    "    print('all constraints: {}'.format(all_ref_constraints))\n",
    "    \n",
    "    # now decode again under new ref constraints\n",
    "    cycle_2_hyp, cycle_2_hyp_score = decode_with_constraints(source_seg, constraint_seq=[max_ref_constraint])\n",
    "    print('Cycle 2: {}'.format(cycle_2_hyp))\n",
    "    \n",
    "    # cutoff the Nones which start each sequence\n",
    "    cycle_1_hyp = cycle_1_hyp[1:]\n",
    "    cycle_2_hyp = cycle_2_hyp[1:]\n",
    "    # cutoff eos\n",
    "    if eos_token in cycle_1_hyp:\n",
    "        cycle_1_hyp = cycle_1_hyp[:cycle_1_hyp.index(eos_token)]\n",
    "    if eos_token in cycle_2_hyp:\n",
    "        cycle_2_hyp = cycle_2_hyp[:cycle_2_hyp.index(eos_token)]\n",
    "    \n",
    "    with codecs.open(primt_cycle_1_output, 'a', encoding='utf8') as cycle_1_out:\n",
    "        cycle_1_out.write(u' '.join(cycle_1_hyp) + u'\\n')\n",
    "    with codecs.open(primt_cycle_2_output, 'a', 'utf8') as cycle_2_out:\n",
    "        cycle_2_out.write(u' '.join(cycle_2_hyp) + u'\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# now decode again under new ref constraints\n",
    "longest_constraint_idx = 0\n",
    "len_longest = 0\n",
    "for c_i, c in enumerate(ref_constraints):\n",
    "    if len(c) > len_longest:\n",
    "        longest_constraint_idx = c_i\n",
    "longest_constraint = ref_constraints[longest_constraint_idx]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "prev_best = list(best_hyp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# constraint_seq = ref_constraints\n",
    "constraint_seq = [longest_constraint]\n",
    "\n",
    "source_, constraints_ = imt_tm.build_input_representations(source_input, constraint_seq)\n",
    "\n",
    "# TODO: this is a hack until we remove the target_prefix completely from the graph\n",
    "target_prefix_ = imt_tm.imt_model.map_idx_or_unk(target_prefix,\n",
    "                                                 imt_tm.imt_model.trg_vocab,\n",
    "                                                 imt_tm.imt_model.unk_idx)\n",
    "\n",
    "target_prefix_ = np.tile(target_prefix_, (1, 1))\n",
    "\n",
    "start_hyp = imt_tm.start_hypothesis(source_seq=source_, target_prefix=target_prefix_, \n",
    "                                       constraints=constraints_)\n",
    "\n",
    "search_grid = decoder.search(start_hyp=start_hyp, constraints=constraints_,\n",
    "                             max_hyp_len=int(round(len(source_input) * 1.5)),\n",
    "                             beam_size=5)\n",
    "\n",
    "top_row = max(k[1] for k in search_grid.keys())\n",
    "\n",
    "if top_row > 1:\n",
    "    output_beams = [search_grid[k] for k in search_grid.keys() if k[1] == top_row]\n",
    "else:\n",
    "    # there are no constraints\n",
    "    # Note this is a very hackish way to get the last beam\n",
    "    output_beams = [search_grid[search_grid.keys()[-1]]]\n",
    "\n",
    "\n",
    "output_hyps = [h for beam in output_beams for h in beam]\n",
    "\n",
    "# getting the true length of each hypothesis\n",
    "eos_token = u'</S>'\n",
    "true_lens = [h.sequence.index(eos_token) if eos_token in h.sequence else len(h.sequence)\n",
    "             for h in output_hyps]\n",
    "true_lens = [float(l) for l in true_lens]\n",
    "\n",
    "output_seqs = [(h.sequence, h.score / true_len) for h, true_len in zip(output_hyps, true_lens)]\n",
    "output_seqs = sorted(output_seqs, key=lambda x: x[1])\n",
    "\n",
    "best_hyp = output_seqs[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "best_hyp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ref"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "prev_best"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ref"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ref_constraints"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "output_seqs[:10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "search_grid.keys()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    ""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    ""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    ""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    ""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    ""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# TODO: use cells below here WHEN WRITING TESTS"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "top_row = max(k[1] for k in search_grid.keys())\n",
    "top_row"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "output_beams = [search_grid[k] for k in search_grid.keys() if k[1] == top_row]\n",
    "output_hyps = [h for beam in output_beams for h in beam]\n",
    "\n",
    "# getting the true length of each hypothesis\n",
    "eos_token = u'</S>'\n",
    "true_lens = [h.sequence.index(eos_token) if eos_token in h.sequence else len(h.sequence)\n",
    "             for h in output_hyps]\n",
    "true_lens = [float(l) for l in true_lens]\n",
    "\n",
    "output_seqs = [(h.sequence, h.score / true_len) for h, true_len in zip(output_hyps, true_lens)]\n",
    "output_seqs = sorted(output_seqs, key=lambda x: x[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "output_seqs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "[k for k in search_grid.keys()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "c = imt_tm.generate_constrained(start_hyp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cont = imt_tm.continue_constrained(c[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cont.sequence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "c[0].sequence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "c = imt_tm.continue_constrained(start_hyp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "t = imt_tm.generate(start_hyp, n_best=10)\n",
    "nts = [imt_tm.generate(h, n_best=10) for h in t]\n",
    "nnts = [[imt_tm.generate(h, n_best=10) for h in t] for t in nts]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "[[[h.sequence for h in stack3] for stack3 in stack2] for stack2 in nnts]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "start_hyp.payload"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    ""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    ""
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2.0
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}