{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 142,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "\n",
    "chinese_embedding = tf.constant([[0.11,0.21,0.31,0.41],\n",
    "                         [0.21,0.31,0.41,0.51],\n",
    "                         [0.31,0.41,0.51,0.61],\n",
    "                         [0.41,0.51,0.61,0.71]],dtype=tf.float32)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "position_encoding = tf.constant([[0.01,0.01,0.01,0.01],\n",
    "                         [0.02,0.02,0.02,0.02],\n",
    "                         [0.03,0.03,0.03,0.03],\n",
    "                         [0.04,0.04,0.04,0.04]],dtype=tf.float32)\n",
    "\n",
    "\n",
    "\n",
    "encoder_input = tf.constant([[0,1,2,3],[2,3,0,1]],dtype=tf.int32)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 143,
   "metadata": {},
   "outputs": [],
   "source": [
    "w_Q = tf.constant([[0.1,0.2,0.3,0.4,0.5,0.6],\n",
    "                   [0.2,0.3,0.4,0.5,0.6,0.7],\n",
    "                   [0.3,0.4,0.5,0.6,0.7,0.8],\n",
    "                   [0.4,0.5,0.6,0.7,0.8,0.9]],dtype=tf.float32)\n",
    "\n",
    "w_K = tf.constant([[0.08,0.18,0.28,0.38,0.48,0.58],\n",
    "                   [0.18,0.28,0.38,0.48,0.58,0.68],\n",
    "                   [0.28,0.38,0.48,0.58,0.68,0.78],\n",
    "                   [0.38,0.48,0.58,0.68,0.78,0.88]],dtype=tf.float32)\n",
    "\n",
    "w_V = tf.constant([[0.12,0.22,0.32,0.42,0.52,0.62],\n",
    "                   [0.22,0.32,0.42,0.52,0.62,0.72],\n",
    "                   [0.32,0.42,0.52,0.62,0.72,0.82],\n",
    "                   [0.42,0.52,0.62,0.72,0.82,0.92]],dtype=tf.float32)\n",
    "\n",
    "w_Z = tf.constant([[0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4]],dtype=tf.float32)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 144,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tf.variable_scope(\"encoder_input\"):\n",
    "    encoder_embedding_input = tf.nn.embedding_lookup(chinese_embedding,encoder_input)\n",
    "    encoder_embedding_input = encoder_embedding_input + position_encoding\n",
    "\n",
    "with tf.variable_scope(\"encoder_scaled_dot_product_attention\"):\n",
    "    encoder_Q = tf.matmul(tf.reshape(encoder_embedding_input,(-1,tf.shape(encoder_embedding_input)[2])),w_Q)\n",
    "    encoder_K = tf.matmul(tf.reshape(encoder_embedding_input,(-1,tf.shape(encoder_embedding_input)[2])),w_K)\n",
    "    encoder_V = tf.matmul(tf.reshape(encoder_embedding_input,(-1,tf.shape(encoder_embedding_input)[2])),w_V)\n",
    "    \n",
    "    encoder_Q = tf.reshape(encoder_Q,(tf.shape(encoder_embedding_input)[0],tf.shape(encoder_embedding_input)[1],-1))\n",
    "    encoder_K = tf.reshape(encoder_K,(tf.shape(encoder_embedding_input)[0],tf.shape(encoder_embedding_input)[1],-1))\n",
    "    encoder_V = tf.reshape(encoder_V,(tf.shape(encoder_embedding_input)[0],tf.shape(encoder_embedding_input)[1],-1))\n",
    "                          \n",
    "                          \n",
    "    attention_map = tf.matmul(encoder_Q,tf.transpose(encoder_K,[0,2,1]))\n",
    "    attention_map = attention_map / 8\n",
    "    attention_map = tf.nn.softmax(attention_map)\n",
    "    \n",
    "    encoder_first_block_output = tf.matmul(attention_map,encoder_V)\n",
    "    \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 145,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[[ 0.21250257  0.23575781  0.261558    0.29018164]\n",
      "  [ 0.19920367  0.22981204  0.26512352  0.3058607 ]\n",
      "  [ 0.18638705  0.22359632  0.2682339   0.32178268]\n",
      "  [ 0.1740716   0.21714528  0.27087748  0.33790565]]\n",
      "\n",
      " [[ 0.26062688  0.31044313  0.19575652  0.23317349]\n",
      "  [ 0.2620635   0.32459584  0.18464124  0.22869946]\n",
      "  [ 0.25752383  0.28774342  0.2147654   0.23996742]\n",
      "  [ 0.25953028  0.30154324  0.20302993  0.23589657]]]\n",
      "[[[ 0.53517497  0.71486932  0.89456373  1.07425821  1.25395262  1.43364716]\n",
      "  [ 0.54090375  0.72271991  0.90453613  1.08635235  1.26816857  1.44998479]\n",
      "  [ 0.54657894  0.730497    0.91441524  1.09833336  1.28225148  1.4661696 ]\n",
      "  [ 0.55218691  0.73818207  0.92417717  1.11017239  1.29616749  1.48216271]]\n",
      "\n",
      " [[ 0.53879762  0.71983379  0.90086997  1.0819062   1.26294243  1.44397855]\n",
      "  [ 0.54297805  0.72556257  0.90814698  1.0907315   1.27331591  1.45590043]\n",
      "  [ 0.531892    0.71037048  0.88884902  1.06732762  1.2458061   1.4242847 ]\n",
      "  [ 0.53612053  0.71616518  0.89620984  1.07625449  1.25629914  1.43634379]]]\n"
     ]
    }
   ],
   "source": [
    "# Scaled Dot-Product Attention\n",
    "with tf.Session() as sess:\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    print(sess.run(attention_map))\n",
    "    print(sess.run(encoder_first_block_output))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 146,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tf.variable_scope(\"encoder_input\"):\n",
    "    encoder_embedding_input = tf.nn.embedding_lookup(chinese_embedding,encoder_input)\n",
    "    encoder_embedding_input = encoder_embedding_input + position_encoding\n",
    "\n",
    "with tf.variable_scope(\"encoder_block\"):\n",
    "    encoder_Q = tf.matmul(tf.reshape(encoder_embedding_input,(-1,tf.shape(encoder_embedding_input)[2])),w_Q)\n",
    "    encoder_K = tf.matmul(tf.reshape(encoder_embedding_input,(-1,tf.shape(encoder_embedding_input)[2])),w_K)\n",
    "    encoder_V = tf.matmul(tf.reshape(encoder_embedding_input,(-1,tf.shape(encoder_embedding_input)[2])),w_V)\n",
    "    \n",
    "    encoder_Q = tf.reshape(encoder_Q,(tf.shape(encoder_embedding_input)[0],tf.shape(encoder_embedding_input)[1],-1))\n",
    "    encoder_K = tf.reshape(encoder_K,(tf.shape(encoder_embedding_input)[0],tf.shape(encoder_embedding_input)[1],-1))\n",
    "    encoder_V = tf.reshape(encoder_V,(tf.shape(encoder_embedding_input)[0],tf.shape(encoder_embedding_input)[1],-1))\n",
    "          \n",
    "    encoder_Q_split = tf.split(encoder_Q,2,axis=2)\n",
    "    encoder_K_split = tf.split(encoder_K,2,axis=2)\n",
    "    encoder_V_split = tf.split(encoder_V,2,axis=2)\n",
    "    \n",
    "    encoder_Q_concat = tf.concat(encoder_Q_split,axis=0)\n",
    "    encoder_K_concat = tf.concat(encoder_K_split,axis=0)\n",
    "    encoder_V_concat = tf.concat(encoder_V_split,axis=0)\n",
    "    \n",
    "    attention_map = tf.matmul(encoder_Q_concat,tf.transpose(encoder_K_concat,[0,2,1]))\n",
    "    attention_map = attention_map / 8\n",
    "    attention_map = tf.nn.softmax(attention_map)\n",
    "    \n",
    "    weightedSumV = tf.matmul(attention_map,encoder_V_concat)\n",
    "    \n",
    "    outputs_z = tf.concat(tf.split(weightedSumV,2,axis=0),axis=2)\n",
    "    \n",
    "    sa_outputs = tf.matmul(tf.reshape(outputs_z,(-1,tf.shape(outputs_z)[2])),w_Z)\n",
    "    sa_outputs = tf.reshape(sa_outputs,(tf.shape(encoder_embedding_input)[0],tf.shape(encoder_embedding_input)[1],-1))\n",
    "    \n",
    "    \n",
    "    sa_outputs = sa_outputs + encoder_embedding_input\n",
    "    \n",
    "    # todo :add BN\n",
    "    W_f = tf.constant([[0.2,0.3,0.5,0.4],\n",
    "                       [0.2,0.3,0.5,0.4],\n",
    "                       [0.2,0.3,0.5,0.4],\n",
    "                       [0.2,0.3,0.5,0.4]])\n",
    "    \n",
    "    ffn_outputs = tf.matmul(tf.reshape(sa_outputs,(-1,tf.shape(sa_outputs)[2])),W_f)\n",
    "    ffn_outputs = tf.reshape(ffn_outputs,(tf.shape(sa_outputs)[0],tf.shape(sa_outputs)[1],-1))\n",
    "    \n",
    "    encoder_outputs = ffn_outputs + sa_outputs\n",
    "    # todo :add BN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 147,
   "metadata": {},
   "outputs": [],
   "source": [
    "# import numpy as np\n",
    "# with tf.Session() as sess:\n",
    "# #     print(sess.run(encoder_Q))\n",
    "# #     print(sess.run(encoder_Q_split))\n",
    "#     #print(sess.run(weightedSumV))\n",
    "#     #print(sess.run(outputs_z))\n",
    "#     #print(sess.run(sa_outputs))\n",
    "#     #print(sess.run(ffn_outputs))\n",
    "#     print(sess.run(encoder_outputs))\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 148,
   "metadata": {},
   "outputs": [],
   "source": [
    "english_embedding = tf.constant([[0.51,0.61,0.71,0.81],\n",
    "                         [0.61,0.71,0.81,0.91],\n",
    "                         [0.71,0.81,0.91,1.01],\n",
    "                         [0.81,0.91,1.01,1.11]],dtype=tf.float32)\n",
    "\n",
    "\n",
    "position_encoding = tf.constant([[0.01,0.01,0.01,0.01],\n",
    "                         [0.02,0.02,0.02,0.02],\n",
    "                         [0.03,0.03,0.03,0.03],\n",
    "                         [0.04,0.04,0.04,0.04]],dtype=tf.float32)\n",
    "\n",
    "decoder_input = tf.constant([[1,2],[2,1]],dtype=tf.int32)\n",
    "\n",
    "\n",
    "w_Q_decoder_sa = tf.constant([[0.15,0.25,0.35,0.45,0.55,0.65],\n",
    "                   [0.25,0.35,0.45,0.55,0.65,0.75],\n",
    "                   [0.35,0.45,0.55,0.65,0.75,0.85],\n",
    "                   [0.45,0.55,0.65,0.75,0.85,0.95]],dtype=tf.float32)\n",
    "\n",
    "w_K_decoder_sa = tf.constant([[0.13,0.23,0.33,0.43,0.53,0.63],\n",
    "                   [0.23,0.33,0.43,0.53,0.63,0.73],\n",
    "                   [0.33,0.43,0.53,0.63,0.73,0.83],\n",
    "                   [0.43,0.53,0.63,0.73,0.83,0.93]],dtype=tf.float32)\n",
    "\n",
    "w_V_decoder_sa = tf.constant([[0.17,0.27,0.37,0.47,0.57,0.67],\n",
    "                   [0.27,0.37,0.47,0.57,0.67,0.77],\n",
    "                   [0.37,0.47,0.57,0.67,0.77,0.87],\n",
    "                   [0.47,0.57,0.67,0.77,0.87,0.97]],dtype=tf.float32)\n",
    "\n",
    "w_Z_decoder_sa = tf.constant([[0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4]],dtype=tf.float32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 149,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tf.variable_scope(\"decoder_input\"):\n",
    "    decoder_embedding_input = tf.nn.embedding_lookup(english_embedding,decoder_input)\n",
    "    decoder_embedding_input = decoder_embedding_input + position_encoding[0:tf.shape(decoder_embedding_input)[1]]\n",
    "\n",
    "with tf.variable_scope(\"decoder_sa_block\"):\n",
    "    decoder_Q = tf.matmul(tf.reshape(decoder_embedding_input,(-1,tf.shape(decoder_embedding_input)[2])),w_Q_decoder_sa)\n",
    "    decoder_K = tf.matmul(tf.reshape(decoder_embedding_input,(-1,tf.shape(decoder_embedding_input)[2])),w_K_decoder_sa)\n",
    "    decoder_V = tf.matmul(tf.reshape(decoder_embedding_input,(-1,tf.shape(decoder_embedding_input)[2])),w_V_decoder_sa)\n",
    "    \n",
    "    decoder_Q = tf.reshape(decoder_Q,(tf.shape(decoder_embedding_input)[0],tf.shape(decoder_embedding_input)[1],-1))\n",
    "    decoder_K = tf.reshape(decoder_K,(tf.shape(decoder_embedding_input)[0],tf.shape(decoder_embedding_input)[1],-1))\n",
    "    decoder_V = tf.reshape(decoder_V,(tf.shape(decoder_embedding_input)[0],tf.shape(decoder_embedding_input)[1],-1))\n",
    "          \n",
    "    decoder_Q_split = tf.split(decoder_Q,2,axis=2)\n",
    "    decoder_K_split = tf.split(decoder_K,2,axis=2)\n",
    "    decoder_V_split = tf.split(decoder_V,2,axis=2)\n",
    "    \n",
    "    decoder_Q_concat = tf.concat(decoder_Q_split,axis=0)\n",
    "    decoder_K_concat = tf.concat(decoder_K_split,axis=0)\n",
    "    decoder_V_concat = tf.concat(decoder_V_split,axis=0)\n",
    "    \n",
    "    decoder_sa_attention_map_raw = tf.matmul(decoder_Q_concat,tf.transpose(decoder_K_concat,[0,2,1]))\n",
    "    decoder_sa_attention_map = decoder_sa_attention_map_raw / 8\n",
    "    diag_vals = tf.ones_like(decoder_sa_attention_map[0,:,:])\n",
    "    tril = tf.contrib.linalg.LinearOperatorTriL(diag_vals).to_dense()\n",
    "    masks = tf.tile(tf.expand_dims(tril,0),[tf.shape(decoder_sa_attention_map)[0],1,1])\n",
    "    paddings = tf.ones_like(masks) * (-2 ** 32 + 1)\n",
    "    decoder_sa_attention_map = tf.where(tf.equal(masks,0),paddings,decoder_sa_attention_map)\n",
    "    decoder_sa_attention_map = tf.nn.softmax(decoder_sa_attention_map)\n",
    "    \n",
    "    weightedSumV = tf.matmul(decoder_sa_attention_map,decoder_V_concat)\n",
    "    \n",
    "    decoder_outputs_z = tf.concat(tf.split(weightedSumV,2,axis=0),axis=2)\n",
    "    \n",
    "    decoder_sa_outputs = tf.matmul(tf.reshape(decoder_outputs_z,(-1,tf.shape(decoder_outputs_z)[2])),w_Z_decoder_sa)\n",
    "    \n",
    "    decoder_sa_outputs = tf.reshape(decoder_sa_outputs,(tf.shape(decoder_embedding_input)[0],tf.shape(decoder_embedding_input)[1],-1))\n",
    "    \n",
    "    \n",
    "    \n",
    "    \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 150,
   "metadata": {},
   "outputs": [],
   "source": [
    "w_Q_decoder_sa2 = tf.constant([[0.2,0.3,0.4,0.5,0.6,0.7],\n",
    "                   [0.3,0.4,0.5,0.6,0.7,0.8],\n",
    "                   [0.4,0.5,0.6,0.7,0.8,0.9],\n",
    "                   [0.5,0.6,0.7,0.8,0.9,1]],dtype=tf.float32)\n",
    "\n",
    "w_K_decoder_sa2 = tf.constant([[0.18,0.28,0.38,0.48,0.58,0.68],\n",
    "                   [0.28,0.38,0.48,0.58,0.68,0.78],\n",
    "                   [0.38,0.48,0.58,0.68,0.78,0.88],\n",
    "                   [0.48,0.58,0.68,0.78,0.88,0.98]],dtype=tf.float32)\n",
    "\n",
    "w_V_decoder_sa2 = tf.constant([[0.22,0.32,0.42,0.52,0.62,0.72],\n",
    "                   [0.32,0.42,0.52,0.62,0.72,0.82],\n",
    "                   [0.42,0.52,0.62,0.72,0.82,0.92],\n",
    "                   [0.52,0.62,0.72,0.82,0.92,1.02]],dtype=tf.float32)\n",
    "\n",
    "w_Z_decoder_sa2 = tf.constant([[0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4],\n",
    "                   [0.1,0.2,0.3,0.4]],dtype=tf.float32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 160,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "with tf.variable_scope(\"decoder_encoder_attention_block\"):\n",
    "    \n",
    "    decoder_sa_outputs = decoder_sa_outputs + decoder_embedding_input\n",
    "    \n",
    "    encoder_decoder_Q = tf.matmul(tf.reshape(decoder_sa_outputs,(-1,tf.shape(decoder_sa_outputs)[2])),w_Q_decoder_sa2)\n",
    "    encoder_decoder_K = tf.matmul(tf.reshape(encoder_outputs,(-1,tf.shape(encoder_outputs)[2])),w_K_decoder_sa2)\n",
    "    encoder_decoder_V = tf.matmul(tf.reshape(encoder_outputs,(-1,tf.shape(encoder_outputs)[2])),w_V_decoder_sa2)\n",
    "    \n",
    "    encoder_decoder_Q = tf.reshape(encoder_decoder_Q,(tf.shape(decoder_embedding_input)[0],tf.shape(decoder_embedding_input)[1],-1))\n",
    "    encoder_decoder_K = tf.reshape(encoder_decoder_K,(tf.shape(encoder_outputs)[0],tf.shape(encoder_outputs)[1],-1))\n",
    "    encoder_decoder_V = tf.reshape(encoder_decoder_V,(tf.shape(encoder_outputs)[0],tf.shape(encoder_outputs)[1],-1))\n",
    "          \n",
    "    encoder_decoder_Q_split = tf.split(encoder_decoder_Q,2,axis=2)\n",
    "    encoder_decoder_K_split = tf.split(encoder_decoder_K,2,axis=2)\n",
    "    encoder_decoder_V_split = tf.split(encoder_decoder_V,2,axis=2)\n",
    "    \n",
    "    encoder_decoder_Q_concat = tf.concat(encoder_decoder_Q_split,axis=0)\n",
    "    encoder_decoder_K_concat = tf.concat(encoder_decoder_K_split,axis=0)\n",
    "    encoder_decoder_V_concat = tf.concat(encoder_decoder_V_split,axis=0)\n",
    "    \n",
    "    encoder_decoder_attention_map_raw = tf.matmul(encoder_decoder_Q_concat,tf.transpose(encoder_decoder_K_concat,[0,2,1]))\n",
    "    encoder_decoder_attention_map = encoder_decoder_attention_map_raw / 8\n",
    "    \n",
    "    encoder_decoder_attention_map = tf.nn.softmax(encoder_decoder_attention_map)\n",
    "    \n",
    "    weightedSumV = tf.matmul(encoder_decoder_attention_map,encoder_decoder_V_concat)\n",
    "    \n",
    "    encoder_decoder_outputs_z = tf.concat(tf.split(weightedSumV,2,axis=0),axis=2)\n",
    "    \n",
    "    encoder_decoder_outputs = tf.matmul(tf.reshape(encoder_decoder_outputs_z,(-1,tf.shape(encoder_decoder_outputs_z)[2])),w_Z_decoder_sa2)\n",
    "    \n",
    "    encoder_decoder_attention_outputs = tf.reshape(encoder_decoder_outputs,(tf.shape(decoder_embedding_input)[0],tf.shape(decoder_embedding_input)[1],-1))\n",
    "    \n",
    "    encoder_decoder_attention_outputs = encoder_decoder_attention_outputs + decoder_sa_outputs\n",
    "    \n",
    "    # todo :add BN\n",
    "    W_f = tf.constant([[0.2,0.3,0.5,0.4],\n",
    "                       [0.2,0.3,0.5,0.4],\n",
    "                       [0.2,0.3,0.5,0.4],\n",
    "                       [0.2,0.3,0.5,0.4]])\n",
    "    \n",
    "    decoder_ffn_outputs = tf.matmul(tf.reshape(encoder_decoder_attention_outputs,(-1,tf.shape(encoder_decoder_attention_outputs)[2])),W_f)\n",
    "    decoder_ffn_outputs = tf.reshape(decoder_ffn_outputs,(tf.shape(encoder_decoder_attention_outputs)[0],tf.shape(encoder_decoder_attention_outputs)[1],-1))\n",
    "    \n",
    "    decoder_outputs = decoder_ffn_outputs + encoder_decoder_attention_outputs\n",
    "    # todo :add BN\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 174,
   "metadata": {},
   "outputs": [
    {
     "ename": "ValueError",
     "evalue": "No variables to optimize.",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-174-908640085cfd>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     14\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msoftmax_cross_entropy_with_logits\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlogits\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlogits\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mlabels\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0my\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     15\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mtrain_op\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAdamOptimizer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlearning_rate\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0.0001\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mminimize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m~/Anaconda/anaconda/envs/python3_5/lib/python3.5/site-packages/tensorflow/python/training/optimizer.py\u001b[0m in \u001b[0;36mminimize\u001b[0;34m(self, loss, global_step, var_list, gate_gradients, aggregation_method, colocate_gradients_with_ops, name, grad_loss)\u001b[0m\n\u001b[1;32m    341\u001b[0m         \u001b[0maggregation_method\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0maggregation_method\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    342\u001b[0m         \u001b[0mcolocate_gradients_with_ops\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcolocate_gradients_with_ops\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 343\u001b[0;31m         grad_loss=grad_loss)\n\u001b[0m\u001b[1;32m    344\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    345\u001b[0m     \u001b[0mvars_with_grad\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mv\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mgrads_and_vars\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mg\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Anaconda/anaconda/envs/python3_5/lib/python3.5/site-packages/tensorflow/python/training/optimizer.py\u001b[0m in \u001b[0;36mcompute_gradients\u001b[0;34m(self, loss, var_list, gate_gradients, aggregation_method, colocate_gradients_with_ops, grad_loss)\u001b[0m\n\u001b[1;32m    406\u001b[0m     \u001b[0mprocessors\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0m_get_processor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mv\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mvar_list\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    407\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mvar_list\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 408\u001b[0;31m       \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"No variables to optimize.\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    409\u001b[0m     \u001b[0mvar_refs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtarget\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mp\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mprocessors\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    410\u001b[0m     grads = gradients.gradients(\n",
      "\u001b[0;31mValueError\u001b[0m: No variables to optimize."
     ]
    }
   ],
   "source": [
    "W_final = tf.constant([[0.2,0.3,0.5,0.4],\n",
    "                       [0.2,0.3,0.5,0.4],\n",
    "                       [0.2,0.3,0.5,0.4],\n",
    "                       [0.2,0.3,0.5,0.4]])\n",
    "\n",
    "logits = tf.matmul(tf.reshape(decoder_outputs,(-1,tf.shape(decoder_outputs)[2])),W_final)\n",
    "logits = tf.reshape(logits,(tf.shape(decoder_outputs)[0],tf.shape(decoder_outputs)[1],-1))\n",
    "    \n",
    "    \n",
    "logits = tf.nn.softmax(logits)\n",
    "\n",
    "y = tf.one_hot(decoder_input,depth=4)\n",
    "\n",
    "loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y)\n",
    "\n",
    "train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 175,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[ 1.74366832  0.74366832]\n",
      " [ 0.74366832  1.74366832]]\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "with tf.Session() as sess:\n",
    "    print(sess.run(loss))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "python35",
   "language": "python",
   "name": "python35"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
