{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import dynet as dy\n",
    "import random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "EOS = \"<EOS>\"\n",
    "characters = list(\"abcdefghijklmnopqrstuvwxyz \")\n",
    "characters.append(EOS)\n",
    "\n",
    "int2char = list(characters)\n",
    "char2int = {c:i for i,c in enumerate(characters)}\n",
    "\n",
    "VOCAB_SIZE = len(characters)\n",
    "\n",
    "LSTM_NUM_OF_LAYERS = 2\n",
    "EMBEDDINGS_SIZE = 32\n",
    "STATE_SIZE = 32\n",
    "ATTENTION_SIZE = 32\n",
    "\n",
    "model = dy.Model()\n",
    "\n",
    "enc_fwd_lstm = dy.LSTMBuilder(LSTM_NUM_OF_LAYERS, EMBEDDINGS_SIZE, STATE_SIZE, model)\n",
    "enc_bwd_lstm = dy.LSTMBuilder(LSTM_NUM_OF_LAYERS, EMBEDDINGS_SIZE, STATE_SIZE, model)\n",
    "\n",
    "dec_lstm = dy.LSTMBuilder(LSTM_NUM_OF_LAYERS, STATE_SIZE*2+EMBEDDINGS_SIZE, STATE_SIZE, model)\n",
    "\n",
    "input_lookup = model.add_lookup_parameters((VOCAB_SIZE, EMBEDDINGS_SIZE))\n",
    "attention_w1 = model.add_parameters( (ATTENTION_SIZE, STATE_SIZE*2))\n",
    "attention_w2 = model.add_parameters( (ATTENTION_SIZE, STATE_SIZE*LSTM_NUM_OF_LAYERS*2))\n",
    "attention_v = model.add_parameters( (1, ATTENTION_SIZE))\n",
    "decoder_w = model.add_parameters( (VOCAB_SIZE, STATE_SIZE))\n",
    "decoder_b = model.add_parameters( (VOCAB_SIZE))\n",
    "output_lookup = model.add_lookup_parameters((VOCAB_SIZE, EMBEDDINGS_SIZE))\n",
    "\n",
    "\n",
    "def embed_sentence(sentence):\n",
    "    sentence = [EOS] + list(sentence) + [EOS]\n",
    "    sentence = [char2int[c] for c in sentence]\n",
    "\n",
    "    global input_lookup\n",
    "\n",
    "    return [input_lookup[char] for char in sentence]\n",
    "\n",
    "\n",
    "def run_lstm(init_state, input_vecs):\n",
    "    s = init_state\n",
    "\n",
    "    out_vectors = []\n",
    "    for vector in input_vecs:\n",
    "        s = s.add_input(vector)\n",
    "        out_vector = s.output()\n",
    "        out_vectors.append(out_vector)\n",
    "    return out_vectors\n",
    "\n",
    "\n",
    "def encode_sentence(enc_fwd_lstm, enc_bwd_lstm, sentence):\n",
    "    sentence_rev = list(reversed(sentence))\n",
    "\n",
    "    fwd_vectors = run_lstm(enc_fwd_lstm.initial_state(), sentence)\n",
    "    bwd_vectors = run_lstm(enc_bwd_lstm.initial_state(), sentence_rev)\n",
    "    bwd_vectors = list(reversed(bwd_vectors))\n",
    "    vectors = [dy.concatenate(list(p)) for p in zip(fwd_vectors, bwd_vectors)]\n",
    "\n",
    "    return vectors\n",
    "\n",
    "\n",
    "def attend(input_mat, state, w1dt):\n",
    "    global attention_w2\n",
    "    global attention_v\n",
    "    w2 = dy.parameter(attention_w2)\n",
    "    v = dy.parameter(attention_v)\n",
    "\n",
    "    # input_mat: (encoder_state x seqlen) => input vecs concatenated as cols\n",
    "    # w1dt: (attdim x seqlen)\n",
    "    # w2dt: (attdim x attdim)\n",
    "    w2dt = w2*dy.concatenate(list(state.s()))\n",
    "    # att_weights: (seqlen,) row vector\n",
    "    unnormalized = dy.transpose(v * dy.tanh(dy.colwise_add(w1dt, w2dt)))\n",
    "    att_weights = dy.softmax(unnormalized)\n",
    "    # context: (encoder_state)\n",
    "    context = input_mat * att_weights\n",
    "    return context\n",
    "\n",
    "\n",
    "def decode(dec_lstm, vectors, output):\n",
    "    output = [EOS] + list(output) + [EOS]\n",
    "    output = [char2int[c] for c in output]\n",
    "\n",
    "    w = dy.parameter(decoder_w)\n",
    "    b = dy.parameter(decoder_b)\n",
    "    w1 = dy.parameter(attention_w1)\n",
    "    input_mat = dy.concatenate_cols(vectors)\n",
    "    w1dt = None\n",
    "\n",
    "    last_output_embeddings = output_lookup[char2int[EOS]]\n",
    "    s = dec_lstm.initial_state().add_input(dy.concatenate([dy.vecInput(STATE_SIZE*2), last_output_embeddings]))\n",
    "    loss = []\n",
    "\n",
    "    for char in output:\n",
    "        # w1dt can be computed and cached once for the entire decoding phase\n",
    "        w1dt = w1dt or w1 * input_mat\n",
    "        vector = dy.concatenate([attend(input_mat, s, w1dt), last_output_embeddings])\n",
    "        s = s.add_input(vector)\n",
    "        out_vector = w * s.output() + b\n",
    "        probs = dy.softmax(out_vector)\n",
    "        last_output_embeddings = output_lookup[char]\n",
    "        loss.append(-dy.log(dy.pick(probs, char)))\n",
    "    loss = dy.esum(loss)\n",
    "    return loss\n",
    "\n",
    "\n",
    "def generate(in_seq, enc_fwd_lstm, enc_bwd_lstm, dec_lstm):\n",
    "    embedded = embed_sentence(in_seq)\n",
    "    encoded = encode_sentence(enc_fwd_lstm, enc_bwd_lstm, embedded)\n",
    "\n",
    "    w = dy.parameter(decoder_w)\n",
    "    b = dy.parameter(decoder_b)\n",
    "    w1 = dy.parameter(attention_w1)\n",
    "    input_mat = dy.concatenate_cols(encoded)\n",
    "    w1dt = None\n",
    "\n",
    "    last_output_embeddings = output_lookup[char2int[EOS]]\n",
    "    s = dec_lstm.initial_state().add_input(dy.concatenate([dy.vecInput(STATE_SIZE * 2), last_output_embeddings]))\n",
    "\n",
    "    out = ''\n",
    "    count_EOS = 0\n",
    "    for i in range(len(in_seq)*2):\n",
    "        if count_EOS == 2: break\n",
    "        # w1dt can be computed and cached once for the entire decoding phase\n",
    "        w1dt = w1dt or w1 * input_mat\n",
    "        vector = dy.concatenate([attend(input_mat, s, w1dt), last_output_embeddings])\n",
    "        s = s.add_input(vector)\n",
    "        out_vector = w * s.output() + b\n",
    "        probs = dy.softmax(out_vector).vec_value()\n",
    "        next_char = probs.index(max(probs))\n",
    "        last_output_embeddings = output_lookup[next_char]\n",
    "        if int2char[next_char] == EOS:\n",
    "            count_EOS += 1\n",
    "            continue\n",
    "\n",
    "        out += int2char[next_char]\n",
    "    return out\n",
    "\n",
    "\n",
    "def get_loss(input_sentence, output_sentence, enc_fwd_lstm, enc_bwd_lstm, dec_lstm):\n",
    "    dy.renew_cg()\n",
    "    embedded = embed_sentence(input_sentence)\n",
    "    encoded = encode_sentence(enc_fwd_lstm, enc_bwd_lstm, embedded)\n",
    "    return decode(dec_lstm, encoded, output_sentence)\n",
    "\n",
    "\n",
    "def train(model, sentence):\n",
    "    trainer = dy.SimpleSGDTrainer(model)\n",
    "    for i in range(600):\n",
    "        loss = get_loss(sentence, sentence, enc_fwd_lstm, enc_bwd_lstm, dec_lstm)\n",
    "        loss_value = loss.value()\n",
    "        loss.backward()\n",
    "        trainer.update()\n",
    "        if i % 20 == 0:\n",
    "            print(loss_value)\n",
    "            print(generate(sentence, enc_fwd_lstm, enc_bwd_lstm, dec_lstm))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The dy.parameter(...) call is now DEPRECATED.\n",
      "        There is no longer need to explicitly add parameters to the computation graph.\n",
      "        Any used parameter will be added automatically.\n",
      "50.48177719116211\n",
      "zzzzzttzzzzttzzztztzztztzz\n",
      "31.476842880249023\n",
      "iiiiiiiiiigggggggggggggggg\n",
      "26.74808120727539\n",
      "iiii  okkggg\n",
      "22.831687927246094\n",
      "iii  wokkng\n",
      "20.25616455078125\n",
      "iii  workng\n",
      "18.189437866210938\n",
      "iti  workigg\n",
      "16.329940795898438\n",
      "it i workigg\n",
      "14.614733695983887\n",
      "it i workin\n",
      "12.87952709197998\n",
      "it i  orking\n",
      "11.016839027404785\n",
      "it is wokkng\n",
      "8.881823539733887\n",
      "it is workin\n",
      "6.623880863189697\n",
      "it is workin\n",
      "5.263763427734375\n",
      "it is workingg\n",
      "3.5002636909484863\n",
      "it is working\n",
      "2.432921886444092\n",
      "it is working\n",
      "1.0741277933120728\n",
      "it is working\n",
      "0.7216563820838928\n",
      "it is working\n",
      "0.531977117061615\n",
      "it is working\n",
      "0.41499829292297363\n",
      "it is working\n",
      "0.3368317484855652\n",
      "it is working\n",
      "0.2814902663230896\n",
      "it is working\n",
      "0.24055416882038116\n",
      "it is working\n",
      "0.20921576023101807\n",
      "it is working\n",
      "0.18455706536769867\n",
      "it is working\n",
      "0.16471265256404877\n",
      "it is working\n",
      "0.14844007790088654\n",
      "it is working\n",
      "0.13488323986530304\n",
      "it is working\n",
      "0.12343519926071167\n",
      "it is working\n",
      "0.1136535108089447\n",
      "it is working\n",
      "0.10520995408296585\n",
      "it is working\n"
     ]
    }
   ],
   "source": [
    "train(model, \"it is working\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:miniconda3]",
   "language": "python",
   "name": "conda-env-miniconda3-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
