{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Imports Cell\n",
    "from __future__ import print_function\n",
    "from tensorflow.keras.models import Model\n",
    "from tensorflow.keras.layers import Input, LSTM, Dense\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 超参数设置\n",
    "\n",
    "设置batch大小、训练重复次数、编码向量维度、数据路径等"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Basic Parameters\n",
    "batch_size = 64  # Batch size for training.\n",
    "epochs = 10  # Number of epochs to train for.\n",
    "latent_dim = 256  # Latent dimensionality of the encoding space.\n",
    "num_samples = 10000  # Number of samples to train on.\n",
    "# Path to the data txt file on disk.\n",
    "data_path = './ell.txt'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 数据的预处理\n",
    "\n",
    "从txt文件中读取数据；\n",
    "input_texts存储英文输入；\n",
    "target_texts存储翻译后成希腊语的单词；\n",
    "input_characters表示英文中无重复的字符；\n",
    "target_characters表示希腊语中对应的无重复字符。\n",
    "\n",
    "解码的时候需要起始字符<BOS>和结束字符<EOS>，这里分别用制表符'\\t'和回车符'\\n'来表示"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Vectorize the data.\n",
    "input_texts = []\n",
    "target_texts = []\n",
    "input_characters = set()\n",
    "target_characters = set()\n",
    "with open(data_path, 'r', encoding='utf-8') as f:\n",
    "    lines = f.read().split('\\n')\n",
    "for line in lines[: min(num_samples, len(lines) - 1)]:\n",
    "    input_text, target_text, _ = line.split('\\t')\n",
    "    # We use \"tab\" as the \"start sequence\" character\n",
    "    # for the targets, and \"\\n\" as \"end sequence\" character.\n",
    "    target_text = '\\t' + target_text + '\\n'\n",
    "    input_texts.append(input_text)\n",
    "    target_texts.append(target_text)\n",
    "    for char in input_text:\n",
    "        if char not in input_characters:\n",
    "            input_characters.add(char)\n",
    "    for char in target_text:\n",
    "        if char not in target_characters:\n",
    "            target_characters.add(char)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "查看数据的特征，编码的对应维度是69，解码对应的维度是110"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Number of samples: 10000\n",
      "Number of unique input tokens: 69\n",
      "Number of unique output tokens: 110\n",
      "Max sequence length for inputs: 25\n",
      "Max sequence length for outputs: 49\n"
     ]
    }
   ],
   "source": [
    "input_characters = sorted(list(input_characters))\n",
    "target_characters = sorted(list(target_characters))\n",
    "num_encoder_tokens = len(input_characters)\n",
    "num_decoder_tokens = len(target_characters)\n",
    "max_encoder_seq_length = max([len(txt) for txt in input_texts])\n",
    "max_decoder_seq_length = max([len(txt) for txt in target_texts])\n",
    "\n",
    "input_token_index = dict(\n",
    "    [(char, i) for i, char in enumerate(input_characters)])\n",
    "target_token_index = dict(\n",
    "    [(char, i) for i, char in enumerate(target_characters)])\n",
    "\n",
    "print('Number of samples:', len(input_texts))\n",
    "print('Number of unique input tokens:', num_encoder_tokens)\n",
    "print('Number of unique output tokens:', num_decoder_tokens)\n",
    "print('Max sequence length for inputs:', max_encoder_seq_length)\n",
    "print('Max sequence length for outputs:', max_decoder_seq_length)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input_characters: [' ', '!', '%', \"'\", ',', '-', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', '?', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'R', 'S', 'T', 'U', 'V', 'W', 'Y', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '€']\n",
      "target_characters: ['\\t', '\\n', ' ', '!', '%', \"'\", ',', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '?', 'C', 'D', 'E', 'F', 'I', 'K', 'M', 'O', 'T', 'U', 'a', 'd', 'e', 'h', 'i', 'm', 'n', 'o', 'r', 't', '·', ';', '΄', 'Ά', 'Έ', 'Ή', 'Ί', 'Ό', 'Ώ', 'ΐ', 'Α', 'Β', 'Γ', 'Δ', 'Ε', 'Ζ', 'Η', 'Θ', 'Ι', 'Κ', 'Λ', 'Μ', 'Ν', 'Ξ', 'Ο', 'Π', 'Ρ', 'Σ', 'Τ', 'Υ', 'Φ', 'Χ', 'Ψ', 'ά', 'έ', 'ή', 'ί', 'α', 'β', 'γ', 'δ', 'ε', 'ζ', 'η', 'θ', 'ι', 'κ', 'λ', 'μ', 'ν', 'ξ', 'ο', 'π', 'ρ', 'ς', 'σ', 'τ', 'υ', 'φ', 'χ', 'ψ', 'ω', 'ϊ', 'ϋ', 'ό', 'ύ', 'ώ', 'ὠ']\n"
     ]
    }
   ],
   "source": [
    "print('input_characters:',input_characters)\n",
    "print('target_characters:',target_characters)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 模型输入\n",
    "\n",
    "设置编码、解码的数据维度\n",
    "\n",
    "编码输入维度：（输入长度，最大输入序列长度，编码字符集合个数）\n",
    "\n",
    "解码输入维度：（输入长度，最大输出序列长度，解码字符集合个数）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Initialize Model Arrays\n",
    "encoder_input_data = np.zeros(\n",
    "    (len(input_texts), max_encoder_seq_length, num_encoder_tokens),\n",
    "    dtype='float32')\n",
    "decoder_input_data = np.zeros(\n",
    "    (len(input_texts), max_decoder_seq_length, num_decoder_tokens),\n",
    "    dtype='float32')\n",
    "decoder_target_data = np.zeros(\n",
    "    (len(input_texts), max_decoder_seq_length, num_decoder_tokens),\n",
    "    dtype='float32')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "装入对应的数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):\n",
    "    for t, char in enumerate(input_text):\n",
    "        encoder_input_data[i, t, input_token_index[char]] = 1.\n",
    "    encoder_input_data[i, t + 1:, input_token_index[' ']] = 1.\n",
    "    for t, char in enumerate(target_text):\n",
    "        # decoder_target_data is ahead of decoder_input_data by one timestep\n",
    "        decoder_input_data[i, t, target_token_index[char]] = 1.\n",
    "        if t > 0:\n",
    "            # decoder_target_data will be ahead by one timestep\n",
    "            # and will not include the start character.\n",
    "            decoder_target_data[i, t - 1, target_token_index[char]] = 1.\n",
    "    decoder_input_data[i, t + 1:, target_token_index[' ']] = 1.\n",
    "    decoder_target_data[i, t:, target_token_index[' ']] = 1."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "encoder_input_data: (10000, 25, 69)\n",
      "decoder_input_data: (10000, 49, 110)\n",
      "decoder_target_data: (10000, 49, 110)\n"
     ]
    }
   ],
   "source": [
    "print('encoder_input_data:',encoder_input_data.shape)\n",
    "print('decoder_input_data:',decoder_input_data.shape)\n",
    "print('decoder_target_data:',decoder_target_data.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define an input sequence and process it.\n",
    "encoder_inputs = Input(shape=(None, num_encoder_tokens))\n",
    "encoder = LSTM(latent_dim, return_state=True)\n",
    "encoder_outputs, state_h, state_c = encoder(encoder_inputs)\n",
    "# We discard `encoder_outputs` and only keep the states.\n",
    "encoder_states = [state_h, state_c]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Set up the decoder, using `encoder_states` as initial state.\n",
    "decoder_inputs = Input(shape=(None, num_decoder_tokens))\n",
    "\n",
    "# We set up our decoder to return full output sequences,\n",
    "# and to return internal states as well. We don't use the\n",
    "# return states in the training model, but we will use them in inference.\n",
    "decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)\n",
    "decoder_outputs, _, _ = decoder_lstm(decoder_inputs,\n",
    "                                     initial_state=encoder_states)\n",
    "decoder_dense = Dense(num_decoder_tokens, activation='softmax')\n",
    "decoder_outputs = decoder_dense(decoder_outputs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 8000 samples, validate on 2000 samples\n",
      "Epoch 1/10\n",
      "8000/8000 [==============================] - 69s 9ms/sample - loss: 1.5498 - accuracy: 0.6326 - val_loss: 1.7987 - val_accuracy: 0.5402\n",
      "Epoch 2/10\n",
      "8000/8000 [==============================] - 67s 8ms/sample - loss: 1.2061 - accuracy: 0.6883 - val_loss: 1.4995 - val_accuracy: 0.5926\n",
      "Epoch 3/10\n",
      "8000/8000 [==============================] - 62s 8ms/sample - loss: 1.0012 - accuracy: 0.7326 - val_loss: 1.3102 - val_accuracy: 0.6387\n",
      "Epoch 4/10\n",
      "8000/8000 [==============================] - 63s 8ms/sample - loss: 0.8812 - accuracy: 0.7538 - val_loss: 1.2051 - val_accuracy: 0.6592\n",
      "Epoch 5/10\n",
      "8000/8000 [==============================] - 63s 8ms/sample - loss: 0.8107 - accuracy: 0.7700 - val_loss: 1.1377 - val_accuracy: 0.6777\n",
      "Epoch 6/10\n",
      "8000/8000 [==============================] - 63s 8ms/sample - loss: 0.7565 - accuracy: 0.7841 - val_loss: 1.0627 - val_accuracy: 0.6970\n",
      "Epoch 7/10\n",
      "8000/8000 [==============================] - 59s 7ms/sample - loss: 0.7083 - accuracy: 0.7962 - val_loss: 1.0445 - val_accuracy: 0.6986\n",
      "Epoch 8/10\n",
      "8000/8000 [==============================] - 59s 7ms/sample - loss: 0.6639 - accuracy: 0.8085 - val_loss: 0.9673 - val_accuracy: 0.7245\n",
      "Epoch 9/10\n",
      "8000/8000 [==============================] - 59s 7ms/sample - loss: 0.6242 - accuracy: 0.8195 - val_loss: 0.9362 - val_accuracy: 0.7339\n",
      "Epoch 10/10\n",
      "8000/8000 [==============================] - 55s 7ms/sample - loss: 0.5891 - accuracy: 0.8300 - val_loss: 0.9095 - val_accuracy: 0.7399\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<tensorflow.python.keras.callbacks.History at 0xb2d195240>"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Define the model that will turn\n",
    "# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`\n",
    "model = Model([encoder_inputs, decoder_inputs], decoder_outputs)\n",
    "\n",
    "# Run training\n",
    "model.compile(optimizer='rmsprop', loss='categorical_crossentropy',\n",
    "              metrics=['accuracy'])\n",
    "model.fit([encoder_input_data, decoder_input_data], decoder_target_data,\n",
    "          batch_size=batch_size,\n",
    "          epochs=epochs,\n",
    "          validation_split=0.2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Save model\n",
    "model.save('s2s.h5')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 模型使用"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "为什么下面这段代码又在构建模型，原因是seq2seq在训练和生成的时候并不完全相同；\n",
    "\n",
    "训练的时候，解码器是有预先输入的，我们会把正确的下句作为输入指导解码器进行学习，具体来说，不管上一个时刻解码器的输出是什么，我们都用预先给定的输入作为本时刻的输入；\n",
    "\n",
    "这种训练方式称为Teacher forcing\n",
    "\n",
    "但是在生成的时候，解码器是没有预先输入的，我们会把上一个时刻解码器的输出作为本时刻的输入，如此迭代的生成句子\n",
    "训练的时候我们的model是一整个seq2seq的模型，这个黑盒在给定encoder_input和decoder_input的情况下可以产生对应的输出\n",
    "但是生成时我们没有decoder_input，我们就把黑盒拆成两个黑盒，一个是编码器，一个是解码器，方便我们的操作。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Next: inference mode (sampling).\n",
    "# Here's the drill:\n",
    "# 1) encode input and retrieve initial decoder state\n",
    "# 2) run one step of decoder with this initial state\n",
    "# and a \"start of sequence\" token as target.\n",
    "# Output will be the next target token\n",
    "# 3) Repeat with the current target token and current states"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define sampling models\n",
    "# 第一个黑盒，编码器，给定encoder_inputs，得到encoder的状态\n",
    "encoder_model = Model(encoder_inputs, encoder_states)\n",
    "# 第二个黑盒，解码器\n",
    "# 解码器接受三个输入，两个是初始状态，一个是之前已经生成的文本\n",
    "decoder_state_input_h = Input(shape=(latent_dim,))\n",
    "decoder_state_input_c = Input(shape=(latent_dim,))\n",
    "decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\n",
    "# 解码器产生三个输出，两个当前状态，一个是每个时刻的输出，其中最后一个时刻的输出可以用来计算下一个字\n",
    "decoder_outputs, state_h, state_c = decoder_lstm(\n",
    "    decoder_inputs, initial_state=decoder_states_inputs)\n",
    "decoder_states = [state_h, state_c]\n",
    "decoder_outputs = decoder_dense(decoder_outputs)\n",
    "decoder_model = Model(\n",
    "    [decoder_inputs] + decoder_states_inputs,\n",
    "    [decoder_outputs] + decoder_states)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Reverse-lookup token index to decode sequences back to\n",
    "# something readable.\n",
    "reverse_input_char_index = dict(\n",
    "    (i, char) for char, i in input_token_index.items())\n",
    "reverse_target_char_index = dict(\n",
    "    (i, char) for char, i in target_token_index.items())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "下述代码就实现了迭代的解码\n",
    "\n",
    "假设我们已经生成了前n个字，我们把前n个字作为输入，得到第n+1个字，再把这n+1个字作为输入，得到第n+2个字，以此类推"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "def decode_sequence(input_seq):\n",
    "    # Encode the input as state vectors.\n",
    "    # 先把上句输入编码器得到编码的中间向量，这个中间向量将是解码器的初始状态向量\n",
    "    states_value = encoder_model.predict(input_seq)\n",
    "\n",
    "    # Generate empty target sequence of length 1.\n",
    "    target_seq = np.zeros((1, 1, num_decoder_tokens))\n",
    "    # Populate the first character of target sequence with the start character.\n",
    "    # 初始的解码器输入是开始符'\\t'\n",
    "    target_seq[0, 0, target_token_index['\\t']] = 1.\n",
    "\n",
    "    # Sampling loop for a batch of sequences\n",
    "    # (to simplify, here we assume a batch of size 1).\n",
    "    stop_condition = False\n",
    "    decoded_sentence = ''\n",
    "    \n",
    "    # 迭代解码\n",
    "    while not stop_condition:\n",
    "        # 把当前的解码器输入和当前的解码器状态向量送进解码器\n",
    "        # 得到对下一个时刻的预测和新的解码器状态向量\n",
    "        output_tokens, h, c = decoder_model.predict(\n",
    "            [target_seq] + states_value)\n",
    "\n",
    "        # Sample a token\n",
    "        # 采样出概率最大的那个字作为下一个时刻的输入\n",
    "        sampled_token_index = np.argmax(output_tokens[0, -1, :])\n",
    "        sampled_char = reverse_target_char_index[sampled_token_index]\n",
    "        decoded_sentence += sampled_char\n",
    "\n",
    "        # Exit condition: either hit max length\n",
    "        # or find stop character.\n",
    "        # 如果采样到了结束符或者生成的句子长度超过了decoder_len，就停止生成\n",
    "        if (sampled_char == '\\n' or\n",
    "           len(decoded_sentence) > max_decoder_seq_length):\n",
    "            stop_condition = True\n",
    "\n",
    "        # Update the target sequence (of length 1).\n",
    "        # 否则我们更新下一个时刻的解码器输入和解码器状态向量\n",
    "        target_seq = np.zeros((1, 1, num_decoder_tokens))\n",
    "        target_seq[0, 0, sampled_token_index] = 1.\n",
    "\n",
    "        # Update states\n",
    "        states_value = [h, c]\n",
    "\n",
    "    return decoded_sentence\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-\n",
      "Input sentence: Go.\n",
      "Decoded sentence: Είναι πολύ παλές.\n",
      "\n",
      "-\n",
      "Input sentence: Run!\n",
      "Decoded sentence: Σε θα το πορέσω.\n",
      "\n",
      "-\n",
      "Input sentence: Run!\n",
      "Decoded sentence: Σε θα το πορέσω.\n",
      "\n",
      "-\n",
      "Input sentence: Who?\n",
      "Decoded sentence: Πότε τον παραστου;\n",
      "\n",
      "-\n",
      "Input sentence: Wow!\n",
      "Decoded sentence: Πόστε τον Τομ;\n",
      "\n",
      "-\n",
      "Input sentence: Help!\n",
      "Decoded sentence: Θέλω να πορέσω.\n",
      "\n",
      "-\n",
      "Input sentence: Jump!\n",
      "Decoded sentence: Σε πορέσαν.\n",
      "\n",
      "-\n",
      "Input sentence: Hello!\n",
      "Decoded sentence: Θέλω να πορέσω.\n",
      "\n",
      "-\n",
      "Input sentence: Hurry!\n",
      "Decoded sentence: Σε θα με του μου.\n",
      "\n",
      "-\n",
      "Input sentence: I try.\n",
      "Decoded sentence: Σε θα μου είναι ευτυχισμένος.\n",
      "\n",
      "-\n",
      "Input sentence: I won!\n",
      "Decoded sentence: Θέλω να πορέσω.\n",
      "\n",
      "-\n",
      "Input sentence: I won!\n",
      "Decoded sentence: Θέλω να πορέσω.\n",
      "\n",
      "-\n",
      "Input sentence: Smile.\n",
      "Decoded sentence: Δεν είσαι ευτυχισμένος.\n",
      "\n",
      "-\n",
      "Input sentence: Attack!\n",
      "Decoded sentence: Μπορείς να το πορέσεις;\n",
      "\n",
      "-\n",
      "Input sentence: Cheers!\n",
      "Decoded sentence: Περες το καλά;\n",
      "\n",
      "-\n",
      "Input sentence: I fell.\n",
      "Decoded sentence: Θέλω να πορέσω.\n",
      "\n",
      "-\n",
      "Input sentence: I know.\n",
      "Decoded sentence: Μπορείς να σε ποραστώνη.\n",
      "\n",
      "-\n",
      "Input sentence: I left.\n",
      "Decoded sentence: Θέλω να πορέσω.\n",
      "\n",
      "-\n",
      "Input sentence: I lied.\n",
      "Decoded sentence: Θέλω να πορέσω.\n",
      "\n",
      "-\n",
      "Input sentence: I lost.\n",
      "Decoded sentence: Δε θα σε πορασένουμε.\n",
      "\n",
      "-\n",
      "Input sentence: I'm OK.\n",
      "Decoded sentence: Είναι πολύ παρές.\n",
      "\n",
      "-\n",
      "Input sentence: I'm OK.\n",
      "Decoded sentence: Είναι πολύ παρές.\n",
      "\n",
      "-\n",
      "Input sentence: I'm OK.\n",
      "Decoded sentence: Είναι πολύ παρές.\n",
      "\n",
      "-\n",
      "Input sentence: Listen.\n",
      "Decoded sentence: Το παραπαλένει.\n",
      "\n",
      "-\n",
      "Input sentence: Really?\n",
      "Decoded sentence: Θα σες στουμετήσες.\n",
      "\n",
      "-\n",
      "Input sentence: Thanks.\n",
      "Decoded sentence: Αυτό το βολύθε είναι καλύ.\n",
      "\n",
      "-\n",
      "Input sentence: Try it.\n",
      "Decoded sentence: Πες μου το πορέξει.\n",
      "\n",
      "-\n",
      "Input sentence: Try it.\n",
      "Decoded sentence: Πες μου το πορέξει.\n",
      "\n",
      "-\n",
      "Input sentence: We try.\n",
      "Decoded sentence: Σε θα μου είναι ευτυχισμένος.\n",
      "\n",
      "-\n",
      "Input sentence: We won.\n",
      "Decoded sentence: Θέλω να πορέσει.\n",
      "\n",
      "-\n",
      "Input sentence: We won.\n",
      "Decoded sentence: Θέλω να πορέσει.\n",
      "\n",
      "-\n",
      "Input sentence: Why me?\n",
      "Decoded sentence: Θα τε περούμενε;\n",
      "\n",
      "-\n",
      "Input sentence: Ask Tom.\n",
      "Decoded sentence: Ο Τομ είναι απολύ παλής.\n",
      "\n",
      "-\n",
      "Input sentence: Ask Tom.\n",
      "Decoded sentence: Ο Τομ είναι απολύ παλής.\n",
      "\n",
      "-\n",
      "Input sentence: Get out.\n",
      "Decoded sentence: Είναι πολύ παλές.\n",
      "\n",
      "-\n",
      "Input sentence: Get out.\n",
      "Decoded sentence: Είναι πολύ παλές.\n",
      "\n",
      "-\n",
      "Input sentence: Goodbye!\n",
      "Decoded sentence: Είναι πολύ παλή.\n",
      "\n",
      "-\n",
      "Input sentence: He came.\n",
      "Decoded sentence: Είναι πολύ παλές.\n",
      "\n",
      "-\n",
      "Input sentence: He runs.\n",
      "Decoded sentence: Τον είναι αυτό.\n",
      "\n",
      "-\n",
      "Input sentence: Help me!\n",
      "Decoded sentence: Σε θα με του συμένος.\n",
      "\n",
      "-\n",
      "Input sentence: Help us.\n",
      "Decoded sentence: Σε θα σε πορασένουμε.\n",
      "\n",
      "-\n",
      "Input sentence: Help us.\n",
      "Decoded sentence: Σε θα σε πορασένουμε.\n",
      "\n",
      "-\n",
      "Input sentence: Hit Tom.\n",
      "Decoded sentence: Ο σκυλίς είναι απόρος.\n",
      "\n",
      "-\n",
      "Input sentence: Hit Tom.\n",
      "Decoded sentence: Ο σκυλίς είναι απόρος.\n",
      "\n",
      "-\n",
      "Input sentence: Hug Tom.\n",
      "Decoded sentence: Είναι απο τη Βοστώνη.\n",
      "\n",
      "-\n",
      "Input sentence: Hug Tom.\n",
      "Decoded sentence: Είναι απο τη Βοστώνη.\n",
      "\n",
      "-\n",
      "Input sentence: I agree.\n",
      "Decoded sentence: Είναι πολύ παλούμενος.\n",
      "\n",
      "-\n",
      "Input sentence: I tried.\n",
      "Decoded sentence: Είναι πολύ παλές.\n",
      "\n",
      "-\n",
      "Input sentence: I'm Tom.\n",
      "Decoded sentence: Είμαι α συπότη στη Μαίρη.\n",
      "\n",
      "-\n",
      "Input sentence: I'm shy.\n",
      "Decoded sentence: Είμαι πολύ παρές.\n",
      "\n",
      "-\n",
      "Input sentence: I'm shy.\n",
      "Decoded sentence: Είμαι πολύ παρές.\n",
      "\n",
      "-\n",
      "Input sentence: Me, too.\n",
      "Decoded sentence: Πετε το μπορί του είναι του Τομ.\n",
      "\n",
      "-\n",
      "Input sentence: Open up.\n",
      "Decoded sentence: Μπορείνε να με το πορέσω.\n",
      "\n",
      "-\n",
      "Input sentence: Perfect!\n",
      "Decoded sentence: Πες μου το πουτά.\n",
      "\n",
      "-\n",
      "Input sentence: See you!\n",
      "Decoded sentence: Είστε απολύ.\n",
      "\n",
      "-\n",
      "Input sentence: See you.\n",
      "Decoded sentence: Είστε απολύ.\n",
      "\n",
      "-\n",
      "Input sentence: Tom ate.\n",
      "Decoded sentence: Ο Τομ είναι απορό τη Μαίρη.\n",
      "\n",
      "-\n",
      "Input sentence: Tom ran.\n",
      "Decoded sentence: Ο Τομ είναι απορό τη Μαίρη.\n",
      "\n",
      "-\n",
      "Input sentence: Tom won.\n",
      "Decoded sentence: Ο Τομ είναι απορό τη Μαίρη.\n",
      "\n",
      "-\n",
      "Input sentence: Wash up.\n",
      "Decoded sentence: Πού είναι αυτό;\n",
      "\n",
      "-\n",
      "Input sentence: We lost.\n",
      "Decoded sentence: Θέλω να πορέσω.\n",
      "\n",
      "-\n",
      "Input sentence: Welcome.\n",
      "Decoded sentence: Θέλω να πορεύσε το καρέμα.\n",
      "\n",
      "-\n",
      "Input sentence: Welcome.\n",
      "Decoded sentence: Θέλω να πορεύσε το καρέμα.\n",
      "\n",
      "-\n",
      "Input sentence: Who ate?\n",
      "Decoded sentence: Πού είναι το καλά;\n",
      "\n",
      "-\n",
      "Input sentence: Who won?\n",
      "Decoded sentence: Ποιος σας είναι το καλά;\n",
      "\n",
      "-\n",
      "Input sentence: Who won?\n",
      "Decoded sentence: Ποιος σας είναι το καλά;\n",
      "\n",
      "-\n",
      "Input sentence: Why not?\n",
      "Decoded sentence: Σας αρέσει το καρί;\n",
      "\n",
      "-\n",
      "Input sentence: You won.\n",
      "Decoded sentence: Σε θα σε πορασένουμε.\n",
      "\n",
      "-\n",
      "Input sentence: You won.\n",
      "Decoded sentence: Σε θα σε πορασένουμε.\n",
      "\n",
      "-\n",
      "Input sentence: You won.\n",
      "Decoded sentence: Σε θα σε πορασένουμε.\n",
      "\n",
      "-\n",
      "Input sentence: You won.\n",
      "Decoded sentence: Σε θα σε πορασένουμε.\n",
      "\n",
      "-\n",
      "Input sentence: Am I fat?\n",
      "Decoded sentence: Ήσαστε ευτυχισμένος;\n",
      "\n",
      "-\n",
      "Input sentence: Am I fat?\n",
      "Decoded sentence: Ήσαστε ευτυχισμένος;\n",
      "\n",
      "-\n",
      "Input sentence: Find Tom.\n",
      "Decoded sentence: Πείνε το μπαρί του Τομ.\n",
      "\n",
      "-\n",
      "Input sentence: Find Tom.\n",
      "Decoded sentence: Πείνε το μπαρί του Τομ.\n",
      "\n",
      "-\n",
      "Input sentence: Grab him.\n",
      "Decoded sentence: Είστε απολύ.\n",
      "\n",
      "-\n",
      "Input sentence: Grab him.\n",
      "Decoded sentence: Είστε απολύ.\n",
      "\n",
      "-\n",
      "Input sentence: Grab him.\n",
      "Decoded sentence: Είστε απολύ.\n",
      "\n",
      "-\n",
      "Input sentence: Have fun.\n",
      "Decoded sentence: Το παστένα.\n",
      "\n",
      "-\n",
      "Input sentence: He tries.\n",
      "Decoded sentence: Είναι πολύ παλές.\n",
      "\n",
      "-\n",
      "Input sentence: Help Tom.\n",
      "Decoded sentence: Δε θα σε πορασένουμε.\n",
      "\n",
      "-\n",
      "Input sentence: Help Tom.\n",
      "Decoded sentence: Δε θα σε πορασένουμε.\n",
      "\n",
      "-\n",
      "Input sentence: Hi, guys.\n",
      "Decoded sentence: Τον είναι ευτυχισμένος;\n",
      "\n",
      "-\n",
      "Input sentence: How cute!\n",
      "Decoded sentence: Του είναι ευτυχισμένος;\n",
      "\n",
      "-\n",
      "Input sentence: How deep?\n",
      "Decoded sentence: Πού είναι αυτό;\n",
      "\n",
      "-\n",
      "Input sentence: Hurry up.\n",
      "Decoded sentence: Σε θα με του μου.\n",
      "\n",
      "-\n",
      "Input sentence: I agreed.\n",
      "Decoded sentence: Είναι πολύ παλούμενος.\n",
      "\n",
      "-\n",
      "Input sentence: I failed.\n",
      "Decoded sentence: Σε θα μου είναι ευτυχισμένος.\n",
      "\n",
      "-\n",
      "Input sentence: I forgot.\n",
      "Decoded sentence: Τον είναι αυτό.\n",
      "\n",
      "-\n",
      "Input sentence: I refuse.\n",
      "Decoded sentence: Είναι πολύ παλούμενος.\n",
      "\n",
      "-\n",
      "Input sentence: I resign.\n",
      "Decoded sentence: Είναι πολύ παλές.\n",
      "\n",
      "-\n",
      "Input sentence: I smiled.\n",
      "Decoded sentence: Είναι πολύ παρα.\n",
      "\n",
      "-\n",
      "Input sentence: I stayed.\n",
      "Decoded sentence: Τον είναι αυτό;\n",
      "\n",
      "-\n",
      "Input sentence: I use it.\n",
      "Decoded sentence: Είναι πολύ παρασένος.\n",
      "\n",
      "-\n",
      "Input sentence: I waited.\n",
      "Decoded sentence: Σε θα με πολύσω.\n",
      "\n",
      "-\n",
      "Input sentence: I'll pay.\n",
      "Decoded sentence: Θα σε συμχωστώ.\n",
      "\n",
      "-\n",
      "Input sentence: I'm busy.\n",
      "Decoded sentence: Είμαι πολύ παρές.\n",
      "\n",
      "-\n",
      "Input sentence: I'm busy.\n",
      "Decoded sentence: Είμαι πολύ παρές.\n",
      "\n",
      "-\n",
      "Input sentence: I'm cold.\n",
      "Decoded sentence: Είμαι πολύ παρές.\n",
      "\n",
      "-\n",
      "Input sentence: I'm fine.\n",
      "Decoded sentence: Είναι πολύ παρές.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "for seq_index in range(100):\n",
    "    # Take one sequence (part of the training set)\n",
    "    # for trying out decoding.\n",
    "    input_seq = encoder_input_data[seq_index: seq_index + 1]\n",
    "    decoded_sentence = decode_sequence(input_seq)\n",
    "    print('-')\n",
    "    print('Input sentence:', input_texts[seq_index])\n",
    "    print('Decoded sentence:', decoded_sentence)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "hide_input": false,
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  },
  "latex_envs": {
   "LaTeX_envs_menu_present": true,
   "autoclose": true,
   "autocomplete": true,
   "bibliofile": "biblio.bib",
   "cite_by": "apalike",
   "current_citInitial": 1,
   "eqLabelWithNumbers": true,
   "eqNumInitial": 1,
   "hotkeys": {
    "equation": "Ctrl-E",
    "itemize": "Ctrl-I"
   },
   "labels_anchors": false,
   "latex_user_defs": false,
   "report_style_numbering": false,
   "user_envs_cfg": false
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {
    "height": "calc(100% - 180px)",
    "left": "10px",
    "top": "150px",
    "width": "280px"
   },
   "toc_section_display": true,
   "toc_window_display": true
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
