{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2.1.0\n",
      "sys.version_info(major=3, minor=6, micro=9, releaselevel='final', serial=0)\n",
      "matplotlib 3.1.2\n",
      "numpy 1.18.1\n",
      "pandas 0.25.3\n",
      "sklearn 0.22.1\n",
      "tensorflow 2.1.0\n",
      "tensorflow_core.keras 2.2.4-tf\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'\\n在本例中，要实现文本预测\\n训练的数据集来自莎士比亚剧本\\n\\n简单流程：\\n1. 处理数据\\n将char 转换为 int，字符生成id\\n将id 转换为 char, 将id生成肉眼可识别的char\\n\\n2. 创建模型\\n\\n3. 使用模型在给定的起始字符串之后，预测出之后的字符串\\n\\n其实在模型中，传递的数据类型都是int型，在与人交互时，才从id值转换为char\\n\\n'"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n",
    "import matplotlib as mpl\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "import numpy as np\n",
    "import sklearn\n",
    "import pandas as pd\n",
    "import os\n",
    "import sys\n",
    "import time\n",
    "import tensorflow as tf\n",
    "\n",
    "from tensorflow import keras\n",
    "\n",
    "print(tf.__version__)\n",
    "print(sys.version_info)\n",
    "for module in mpl, np, pd, sklearn, tf, keras:\n",
    "    print(module.__name__, module.__version__)\n",
    "    \n",
    "    \n",
    "'''\n",
    "在本例中，要实现文本预测\n",
    "训练的数据集来自莎士比亚剧本\n",
    "\n",
    "简单流程：\n",
    "1. 处理数据\n",
    "将char 转换为 int，字符生成id\n",
    "将id 转换为 char, 将id生成肉眼可识别的char\n",
    "\n",
    "2. 创建模型\n",
    "\n",
    "3. 使用模型在给定的起始字符串之后，预测出之后的字符串\n",
    "\n",
    "其实在模型中，传递的数据类型都是int型，在与人交互时，才从id值转换为char\n",
    "\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1115394\n",
      "First Citizen:\n",
      "Before we proceed any further, hear me speak.\n",
      "\n",
      "All:\n",
      "Speak, speak.\n",
      "\n",
      "First Citizen:\n",
      "You\n"
     ]
    }
   ],
   "source": [
    "# https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt\n",
    "# 导入训练文本的数据（莎士比亚戏剧对话）\n",
    "input_filepath = \"./shakespeare.txt\"\n",
    "# 使用原始方法直接从文件中读取文字数据，包含各种符号，换行之类的\n",
    "text = open(input_filepath, 'r').read()\n",
    "\n",
    "# 显示数据信息\n",
    "print(len(text))\n",
    "print(text[0:100])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "65\n",
      "['\\n', ' ', '!', '$', '&', \"'\", ',', '-', '.', '3', ':', ';', '?', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n"
     ]
    }
   ],
   "source": [
    "# 1. generate vocab\n",
    "# 2. build mapping char->id\n",
    "# 3. data -> id_data\n",
    "# 4. abcd -> bcd<eos>\n",
    "\n",
    "\n",
    "#创建 词汇表vocab。作用是产生一个 char <=> int 的映射关系\n",
    "# set，就是从字符串中，取出不重复的所有字符\n",
    "vocab = sorted(set(text))\n",
    "# 可见词汇表中一共就65个字符\n",
    "print(len(vocab))\n",
    "print(vocab)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'\\n': 0, ' ': 1, '!': 2, '$': 3, '&': 4, \"'\": 5, ',': 6, '-': 7, '.': 8, '3': 9, ':': 10, ';': 11, '?': 12, 'A': 13, 'B': 14, 'C': 15, 'D': 16, 'E': 17, 'F': 18, 'G': 19, 'H': 20, 'I': 21, 'J': 22, 'K': 23, 'L': 24, 'M': 25, 'N': 26, 'O': 27, 'P': 28, 'Q': 29, 'R': 30, 'S': 31, 'T': 32, 'U': 33, 'V': 34, 'W': 35, 'X': 36, 'Y': 37, 'Z': 38, 'a': 39, 'b': 40, 'c': 41, 'd': 42, 'e': 43, 'f': 44, 'g': 45, 'h': 46, 'i': 47, 'j': 48, 'k': 49, 'l': 50, 'm': 51, 'n': 52, 'o': 53, 'p': 54, 'q': 55, 'r': 56, 's': 57, 't': 58, 'u': 59, 'v': 60, 'w': 61, 'x': 62, 'y': 63, 'z': 64}\n"
     ]
    }
   ],
   "source": [
    "# 将词汇表 转置一下，以便根据char，查询出id\n",
    "char2idx = {char:idx for idx, char in enumerate(vocab)}\n",
    "print(char2idx)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['\\n' ' ' '!' '$' '&' \"'\" ',' '-' '.' '3' ':' ';' '?' 'A' 'B' 'C' 'D' 'E'\n",
      " 'F' 'G' 'H' 'I' 'J' 'K' 'L' 'M' 'N' 'O' 'P' 'Q' 'R' 'S' 'T' 'U' 'V' 'W'\n",
      " 'X' 'Y' 'Z' 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o'\n",
      " 'p' 'q' 'r' 's' 't' 'u' 'v' 'w' 'x' 'y' 'z']\n"
     ]
    }
   ],
   "source": [
    "# 为了便于操作，将词汇表 转变为np对象\n",
    "# 小结：到此时为止，一共存在在两种词汇表信息 \n",
    "# 1. id=>char   idx2char\n",
    "# 2. char=>id   char2idx\n",
    "idx2char = np.array(vocab)\n",
    "print(idx2char)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[18 47 56 57 58  1 15 47 58 47]\n",
      "First Citi\n"
     ]
    }
   ],
   "source": [
    "# 在产生了词汇表之后，将文本数据转换为id数据\n",
    "# 最终的结果是  很长了一维向量，该向量里记录了莎士比亚剧本中的所有 字符信息\n",
    "# 也就使 该向量的长度 = 文本中字符的 数量（包括换行，空格等的特殊字符）\n",
    "text_as_int = np.array([char2idx[c] for c in text])\n",
    "print(text_as_int[0:10])\n",
    "print(text[0:10])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(18, shape=(), dtype=int64) F\n",
      "tf.Tensor(47, shape=(), dtype=int64) i\n",
      "tf.Tensor(\n",
      "[18 47 56 57 58  1 15 47 58 47 64 43 52 10  0 14 43 44 53 56 43  1 61 43\n",
      "  1 54 56 53 41 43 43 42  1 39 52 63  1 44 59 56 58 46 43 56  6  1 46 43\n",
      " 39 56  1 51 43  1 57 54 43 39 49  8  0  0 13 50 50 10  0 31 54 43 39 49\n",
      "  6  1 57 54 43 39 49  8  0  0 18 47 56 57 58  1 15 47 58 47 64 43 52 10\n",
      "  0 37 53 59  1], shape=(101,), dtype=int64)\n",
      "'First Citizen:\\nBefore we proceed any further, hear me speak.\\n\\nAll:\\nSpeak, speak.\\n\\nFirst Citizen:\\nYou '\n",
      "tf.Tensor(\n",
      "[39 56 43  1 39 50 50  1 56 43 57 53 50 60 43 42  1 56 39 58 46 43 56  1\n",
      " 58 53  1 42 47 43  1 58 46 39 52  1 58 53  1 44 39 51 47 57 46 12  0  0\n",
      " 13 50 50 10  0 30 43 57 53 50 60 43 42  8  1 56 43 57 53 50 60 43 42  8\n",
      "  0  0 18 47 56 57 58  1 15 47 58 47 64 43 52 10  0 18 47 56 57 58  6  1\n",
      " 63 53 59  1 49], shape=(101,), dtype=int64)\n",
      "'are all resolved rather to die than to famish?\\n\\nAll:\\nResolved. resolved.\\n\\nFirst Citizen:\\nFirst, you k'\n"
     ]
    }
   ],
   "source": [
    "# 将 变成id形式的文本信息变成 dataset格式，方便如batch_size的处理\n",
    "char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)\n",
    "\n",
    "\n",
    "\n",
    "# 定义句子长度，即一个句子包括100个字符，长度为100\n",
    "seq_length = 100\n",
    "# 为了将 char 变成 string ，从而使用batch\n",
    "# 现在数据中的格式是：[一共句子的数量，101]，101表示每个句子的长度\n",
    "# 为什么是101？因为后面要从一个句子中分解出 x数据和y数据。\n",
    "# 因为我要进行预测，比方说 abcde这句话，我就要拆分出 abcd=>bcde两个数据集，abcd是输入数据x，bcde就是要预测的结果，所以这里多加了1\n",
    "seq_dataset = char_dataset.batch(seq_length + 1,\n",
    "                                 # drop_remainder表示将最后不够batch_size的数据给丢掉\n",
    "                                 drop_remainder = True)\n",
    "\n",
    "for ch_id in char_dataset.take(2):\n",
    "    # 这里在idx2char[]的方框中传入一个数组，就能够将该数组对应的下标的值给选出来，是个python语法糖\n",
    "    print(ch_id, idx2char[ch_id.numpy()])\n",
    "\n",
    "# 为什么显示出句子，因为文本中的所有字符，包括 空格，换行，都被指定了下标\n",
    "# 在从seq_dataset中，显示数据（从参照表中查询出指定的id所对应的字符，）\n",
    "for seq_id in seq_dataset.take(2):\n",
    "    print(seq_id)\n",
    "    #repr表示将特殊字符如换行符 \\n显示出来\n",
    "    print(repr(''.join(idx2char[seq_id.numpy()])))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[18 47 56 57 58  1 15 47 58 47 64 43 52 10  0 14 43 44 53 56 43  1 61 43\n",
      "  1 54 56 53 41 43 43 42  1 39 52 63  1 44 59 56 58 46 43 56  6  1 46 43\n",
      " 39 56  1 51 43  1 57 54 43 39 49  8  0  0 13 50 50 10  0 31 54 43 39 49\n",
      "  6  1 57 54 43 39 49  8  0  0 18 47 56 57 58  1 15 47 58 47 64 43 52 10\n",
      "  0 37 53 59]\n",
      "[47 56 57 58  1 15 47 58 47 64 43 52 10  0 14 43 44 53 56 43  1 61 43  1\n",
      " 54 56 53 41 43 43 42  1 39 52 63  1 44 59 56 58 46 43 56  6  1 46 43 39\n",
      " 56  1 51 43  1 57 54 43 39 49  8  0  0 13 50 50 10  0 31 54 43 39 49  6\n",
      "  1 57 54 43 39 49  8  0  0 18 47 56 57 58  1 15 47 58 47 64 43 52 10  0\n",
      " 37 53 59  1]\n",
      "[39 56 43  1 39 50 50  1 56 43 57 53 50 60 43 42  1 56 39 58 46 43 56  1\n",
      " 58 53  1 42 47 43  1 58 46 39 52  1 58 53  1 44 39 51 47 57 46 12  0  0\n",
      " 13 50 50 10  0 30 43 57 53 50 60 43 42  8  1 56 43 57 53 50 60 43 42  8\n",
      "  0  0 18 47 56 57 58  1 15 47 58 47 64 43 52 10  0 18 47 56 57 58  6  1\n",
      " 63 53 59  1]\n",
      "[56 43  1 39 50 50  1 56 43 57 53 50 60 43 42  1 56 39 58 46 43 56  1 58\n",
      " 53  1 42 47 43  1 58 46 39 52  1 58 53  1 44 39 51 47 57 46 12  0  0 13\n",
      " 50 50 10  0 30 43 57 53 50 60 43 42  8  1 56 43 57 53 50 60 43 42  8  0\n",
      "  0 18 47 56 57 58  1 15 47 58 47 64 43 52 10  0 18 47 56 57 58  6  1 63\n",
      " 53 59  1 49]\n"
     ]
    }
   ],
   "source": [
    "# 将输入的，一段字符，拆分出输入数据和输出数据，返回tuple\n",
    "# 拆分出 input 和 label\n",
    "def split_input_target(id_text):\n",
    "    \"\"\"\n",
    "    abcde -> abcd, bcde\n",
    "    \"\"\"\n",
    "    # 返回数据的格式是 (list1,list2)\n",
    "    return id_text[0:-1], id_text[1:]\n",
    "\n",
    "# 对dataset对象，使用map方法，逐一的将每一个数据带入到split_input_target函数中\n",
    "# 每个dataset中的item都变成了 一个元组\n",
    "# 目前为止，seq_dataset是dataset类型，且数据格式类似于\n",
    "# [\n",
    "#     # 每一个item\n",
    "#     (\n",
    "#         [输入数据的ids],\n",
    "#         [输出数据的ids]\n",
    "#     )\n",
    "    \n",
    "# ]\n",
    "\n",
    "\n",
    "seq_dataset = seq_dataset.map(split_input_target)\n",
    "\n",
    "for item_input, item_output in seq_dataset.take(2):\n",
    "    print(item_input.numpy())\n",
    "    print(item_output.numpy())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(64, 100)\n",
      "(64, 100)\n"
     ]
    }
   ],
   "source": [
    "batch_size = 64\n",
    "buffer_size = 10000\n",
    "\n",
    "\n",
    "# 进行混排，并且使用batch_size，分割数据（起始batch_size相当于增加了一层维度）\n",
    "# 此时每个batch_size中的数据格式是：\n",
    "# [\n",
    "#     # 每一个item\n",
    "#     (\n",
    "#         [输入数据的ids],\n",
    "#         [输出数据的ids]\n",
    "#     )\n",
    "    \n",
    "# ]\n",
    "seq_dataset = seq_dataset.shuffle(buffer_size).batch(\n",
    "    # drop_remainder丢弃最后不足batch_size的数据\n",
    "    # 在以前不需要这样做的原因是，数据使用了 repeat()无限数据量\n",
    "    batch_size, drop_remainder=True)\n",
    "\n",
    "\n",
    "for item_input, item_output in seq_dataset.take(1):\n",
    "    print(item_input.shape)\n",
    "    print(item_output.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential_6\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "embedding_6 (Embedding)      (64, None, 256)           16640     \n",
      "_________________________________________________________________\n",
      "simple_rnn_5 (SimpleRNN)     (64, None, 1024)          1311744   \n",
      "_________________________________________________________________\n",
      "dense_5 (Dense)              (64, None, 65)            66625     \n",
      "=================================================================\n",
      "Total params: 1,395,009\n",
      "Trainable params: 1,395,009\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "# 指定char表数量\n",
    "vocab_size = len(vocab)\n",
    "# 每个char所embedding的向量长度\n",
    "embedding_dim = 256\n",
    "# RNN的神经元个数\n",
    "rnn_units = 1024\n",
    "\n",
    "# 创建模型的函数\n",
    "def build_model(vocab_size, embedding_dim, rnn_units, batch_size):\n",
    "    model = keras.models.Sequential([\n",
    "        # 梳理一下 embedding的作用：将正整数(索引)转化为固定大小的稠密向量。\n",
    "        # 将每个句子中的char下标（这个下标其实就是该char在字符表中的位置，为了将char=>int），转换为固定长度的稠密向量\n",
    "        keras.layers.Embedding(vocab_size, embedding_dim,\n",
    "                               # 指定输入数据的batch_size，为了在RNN中使用“状态”功能\n",
    "                               # 指定输入的数据格式为 [batch_size,None]\n",
    "                               batch_input_shape = [batch_size,None]\n",
    "                              ),\n",
    "        \n",
    "        # 创建RNN层，\n",
    "        #为了在RNN中使用状态，需要满足以下条件\n",
    "#         1. 通过将 batch_size 参数传递给模型的第一层来显式指定你正在使用的批大小。\n",
    "#         2. 在 RNN 层中设置 stateful = True。\n",
    "#         3. 在调用 fit() 时指定 shuffle = False。默认就是false\n",
    "        \n",
    "        keras.layers.SimpleRNN(units = rnn_units,\n",
    "                               # 通俗来讲，我模型每次训练一句话，但是我会将上一句话的模型状态当作这一句话的初始状态\n",
    "                               # 使得每句话之间存在联系\n",
    "                               stateful = True,\n",
    "                               recurrent_initializer = 'glorot_uniform',\n",
    "                               # 关于return_sequences的理解，如果设置为true，表示该层返回值为一个序列，可以理解为返回一个二元的矩阵\n",
    "                               # 是返回输出序列中的最后一个输出，还是返回完整序列。\n",
    "                               # 最后一个输出：表示输出的结果是个char的embedding 向量，模型最终输出结果是[batch_size,]\n",
    "                               # \n",
    "                               return_sequences = True\n",
    "                              ),\n",
    "        \n",
    "#         # 全连接层，长度为字典长度，，没有激活函数\n",
    "#         # 表示下一个字母在字母表中 对应字母的概率的概率\n",
    "        keras.layers.Dense(vocab_size)\n",
    "    ])\n",
    "    # 返回模型\n",
    "    return model\n",
    "\n",
    "# 调用函数，产生模型\n",
    "model = build_model(\n",
    "    vocab_size = vocab_size,\n",
    "    embedding_dim = embedding_dim,\n",
    "    rnn_units = rnn_units,\n",
    "    batch_size = batch_size)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "model.summary()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Model was constructed with shape Tensor(\"embedding_6_input:0\", shape=(64, None), dtype=float32) for input (64, None), but it was re-called on a Tensor with incompatible shape (1, 3).\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "Tensor's shape (3, 64, 1024) is not compatible with supplied shape [3, 1, 1024]",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-46-f06aa0e6cbf2>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      5\u001b[0m \u001b[0;31m# 进行维度扩展，在当前维度前，再加一个维度\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      6\u001b[0m \u001b[0minput_eval\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexpand_dims\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_eval\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 7\u001b[0;31m \u001b[0mpred_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_eval\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      8\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpred_data\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/environment/tf_py3/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, inputs, *args, **kwargs)\u001b[0m\n\u001b[1;32m    820\u001b[0m           with base_layer_utils.autocast_context_manager(\n\u001b[1;32m    821\u001b[0m               self._compute_dtype):\n\u001b[0;32m--> 822\u001b[0;31m             \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcall\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcast_inputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    823\u001b[0m           \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_handle_activity_regularization\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    824\u001b[0m           \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_set_mask_metadata\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_masks\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/environment/tf_py3/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs, training, mask)\u001b[0m\n\u001b[1;32m    265\u001b[0m       \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbuilt\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    266\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_init_graph_network\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 267\u001b[0;31m       \u001b[0;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSequential\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcall\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtraining\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtraining\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmask\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    268\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    269\u001b[0m     \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0minputs\u001b[0m  \u001b[0;31m# handle the corner case where self.layers is empty\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/environment/tf_py3/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/network.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs, training, mask)\u001b[0m\n\u001b[1;32m    715\u001b[0m     return self._run_internal_graph(\n\u001b[1;32m    716\u001b[0m         \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtraining\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtraining\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmask\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 717\u001b[0;31m         convert_kwargs_to_constants=base_layer_utils.call_context().saving)\n\u001b[0m\u001b[1;32m    718\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    719\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0mcompute_output_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_shape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/environment/tf_py3/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/network.py\u001b[0m in \u001b[0;36m_run_internal_graph\u001b[0;34m(self, inputs, training, mask, convert_kwargs_to_constants)\u001b[0m\n\u001b[1;32m    889\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    890\u001b[0m           \u001b[0;31m# Compute outputs.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 891\u001b[0;31m           \u001b[0moutput_tensors\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcomputed_tensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    892\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    893\u001b[0m           \u001b[0;31m# Update tensor_dict.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/environment/tf_py3/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/recurrent.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, inputs, initial_state, constants, **kwargs)\u001b[0m\n\u001b[1;32m    642\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    643\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0minitial_state\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mconstants\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 644\u001b[0;31m       \u001b[0;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mRNN\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    645\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    646\u001b[0m     \u001b[0;31m# If any of `initial_state` or `constants` are specified and are Keras\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/environment/tf_py3/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, inputs, *args, **kwargs)\u001b[0m\n\u001b[1;32m    820\u001b[0m           with base_layer_utils.autocast_context_manager(\n\u001b[1;32m    821\u001b[0m               self._compute_dtype):\n\u001b[0;32m--> 822\u001b[0;31m             \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcall\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcast_inputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    823\u001b[0m           \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_handle_activity_regularization\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    824\u001b[0m           \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_set_mask_metadata\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_masks\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/environment/tf_py3/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/recurrent.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs, mask, training, initial_state)\u001b[0m\n\u001b[1;32m   1523\u001b[0m     \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_maybe_reset_cell_dropout_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcell\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1524\u001b[0m     return super(SimpleRNN, self).call(\n\u001b[0;32m-> 1525\u001b[0;31m         inputs, mask=mask, training=training, initial_state=initial_state)\n\u001b[0m\u001b[1;32m   1526\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1527\u001b[0m   \u001b[0;34m@\u001b[0m\u001b[0mproperty\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/environment/tf_py3/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/recurrent.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs, mask, training, initial_state, constants)\u001b[0m\n\u001b[1;32m    780\u001b[0m         \u001b[0minput_length\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mrow_lengths\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrow_lengths\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mtimesteps\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    781\u001b[0m         \u001b[0mtime_major\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime_major\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 782\u001b[0;31m         zero_output_for_mask=self.zero_output_for_mask)\n\u001b[0m\u001b[1;32m    783\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    784\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstateful\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/environment/tf_py3/lib/python3.6/site-packages/tensorflow_core/python/keras/backend.py\u001b[0m in \u001b[0;36mrnn\u001b[0;34m(step_function, inputs, initial_states, go_backwards, mask, constants, unroll, input_length, time_major, zero_output_for_mask)\u001b[0m\n\u001b[1;32m   4192\u001b[0m     \u001b[0;32mreturn\u001b[0m \u001b[0moutput_\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   4193\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4194\u001b[0;31m   \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnest\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmap_structure\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mset_shape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   4195\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   4196\u001b[0m   \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mtime_major\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/environment/tf_py3/lib/python3.6/site-packages/tensorflow_core/python/util/nest.py\u001b[0m in \u001b[0;36mmap_structure\u001b[0;34m(func, *structure, **kwargs)\u001b[0m\n\u001b[1;32m    566\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    567\u001b[0m   return pack_sequence_as(\n\u001b[0;32m--> 568\u001b[0;31m       \u001b[0mstructure\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mx\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mentries\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    569\u001b[0m       expand_composites=expand_composites)\n\u001b[1;32m    570\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/environment/tf_py3/lib/python3.6/site-packages/tensorflow_core/python/util/nest.py\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m    566\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    567\u001b[0m   return pack_sequence_as(\n\u001b[0;32m--> 568\u001b[0;31m       \u001b[0mstructure\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mx\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mentries\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    569\u001b[0m       expand_composites=expand_composites)\n\u001b[1;32m    570\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/environment/tf_py3/lib/python3.6/site-packages/tensorflow_core/python/keras/backend.py\u001b[0m in \u001b[0;36mset_shape\u001b[0;34m(output_)\u001b[0m\n\u001b[1;32m   4189\u001b[0m       \u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtime_steps\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   4190\u001b[0m       \u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbatch\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4191\u001b[0;31m       \u001b[0moutput_\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   4192\u001b[0m     \u001b[0;32mreturn\u001b[0m \u001b[0moutput_\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   4193\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/environment/tf_py3/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py\u001b[0m in \u001b[0;36mset_shape\u001b[0;34m(self, shape)\u001b[0m\n\u001b[1;32m   1086\u001b[0m       raise ValueError(\n\u001b[1;32m   1087\u001b[0m           \u001b[0;34m\"Tensor's shape %s is not compatible with supplied shape %s\"\u001b[0m \u001b[0;34m%\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1088\u001b[0;31m           (self.shape, shape))\n\u001b[0m\u001b[1;32m   1089\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1090\u001b[0m   \u001b[0;31m# Methods not supported / implemented for Eager Tensors.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mValueError\u001b[0m: Tensor's shape (3, 64, 1024) is not compatible with supplied shape [3, 1, 1024]"
     ]
    }
   ],
   "source": [
    "model.build(tf.TensorShape([1,None]))  \n",
    "start_string=\"asd\"\n",
    "input_eval = [char2idx[ch] for ch in start_string]\n",
    "# 使用tf.expand_dims ，对数据进行扩容，因为模型接受的数据格式是 二维矩阵 输入的数据是个[1,None]的矩阵（在模型build时指定的）\n",
    "# 进行维度扩展，在当前维度前，再加一个维度\n",
    "input_eval = tf.expand_dims(input_eval, 0)\n",
    "pred_data=model(input_eval)\n",
    "print(pred_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(64, 100, 65)\n",
      "tf.Tensor(\n",
      "[[[-0.15907884  0.05934067 -0.35366526 ...  0.12091524 -0.09147462\n",
      "    0.4358253 ]\n",
      "  [-0.05790326 -0.40916786  0.27189803 ... -0.2619867   0.2677713\n",
      "    0.05334934]\n",
      "  [-0.04561081  0.02039014 -0.24163127 ... -0.19094382  0.15602714\n",
      "    0.376827  ]\n",
      "  ...\n",
      "  [-0.12340189  0.20502858 -0.05556625 ...  0.13593827  0.04552092\n",
      "    0.00970295]\n",
      "  [-0.02586978  0.37635273 -0.07015497 ...  0.15208712 -0.1154373\n",
      "    0.02528143]\n",
      "  [ 0.07139814 -0.3767841  -0.23202491 ...  0.04688855 -0.0009187\n",
      "   -0.03393807]]\n",
      "\n",
      " [[ 0.02263551 -0.3452119   0.11007877 ... -0.02145951 -0.05537006\n",
      "    0.24492201]\n",
      "  [ 0.01692788 -0.26362032  0.32129034 ... -0.07081421  0.19142875\n",
      "    0.60429585]\n",
      "  [ 0.02687026 -0.19645339  0.08335888 ... -0.00478393  0.4116402\n",
      "   -0.1801524 ]\n",
      "  ...\n",
      "  [ 0.16854075  0.08873902  0.08166758 ... -0.45012632 -0.05288659\n",
      "    0.09841383]\n",
      "  [ 0.11510189  0.10690777 -0.15515053 ...  0.42847517 -0.1245665\n",
      "    0.15683392]\n",
      "  [-0.13634841 -0.41528556  0.14921828 ... -0.2470079  -0.04747797\n",
      "   -0.17404434]]\n",
      "\n",
      " [[ 0.20098111  0.0971952  -0.17131078 ... -0.17353582  0.23881276\n",
      "    0.18443373]\n",
      "  [ 0.03630373  0.05930914 -0.14864865 ...  0.14223294 -0.02277375\n",
      "   -0.31350046]\n",
      "  [-0.10853182  0.3193152  -0.42212117 ... -0.21642184 -0.06386869\n",
      "   -0.1322142 ]\n",
      "  ...\n",
      "  [ 0.07756555  0.00065109 -0.2520966  ...  0.4371992  -0.10923851\n",
      "   -0.38642555]\n",
      "  [-0.23805544  0.04638606 -0.05531638 ... -0.02296939  0.03753167\n",
      "    0.06496816]\n",
      "  [ 0.13943793 -0.12311132  0.37859198 ... -0.13926992  0.1791926\n",
      "    0.44092107]]\n",
      "\n",
      " ...\n",
      "\n",
      " [[ 0.10512359  0.14557481 -0.30408582 ...  0.12941174  0.23001671\n",
      "    0.08461707]\n",
      "  [-0.00506612 -0.00106728 -0.17108294 ... -0.41177705 -0.12523511\n",
      "   -0.14533111]\n",
      "  [-0.03491917 -0.19073267 -0.1117237  ...  0.09725921 -0.00825649\n",
      "   -0.21510872]\n",
      "  ...\n",
      "  [ 0.02243456 -0.26367837 -0.0545682  ... -0.09700137  0.27000788\n",
      "    0.20371255]\n",
      "  [-0.05425185 -0.15236905  0.03916599 ... -0.23270833 -0.11345489\n",
      "   -0.0226634 ]\n",
      "  [ 0.04137148  0.02847419 -0.22375672 ... -0.08732872  0.07080723\n",
      "    0.19233136]]\n",
      "\n",
      " [[-0.06322314  0.01641092 -0.35054013 ... -0.14737254  0.10039818\n",
      "   -0.06733342]\n",
      "  [ 0.12103817 -0.04258583 -0.300988   ...  0.2133859  -0.10981322\n",
      "    0.11939503]\n",
      "  [-0.1636732   0.15631798 -0.11759447 ... -0.12457443 -0.18808115\n",
      "    0.03608835]\n",
      "  ...\n",
      "  [-0.06876349 -0.1296512   0.08586322 ...  0.30073297  0.01976123\n",
      "    0.19248523]\n",
      "  [ 0.04618544 -0.09374399  0.02377518 ... -0.5380454   0.45875528\n",
      "   -0.02623977]\n",
      "  [-0.01932229  0.42642537 -0.30419624 ...  0.44664502 -0.29847172\n",
      "    0.18099767]]\n",
      "\n",
      " [[-0.1227618   0.09733544 -0.19500968 ...  0.00190641 -0.15202948\n",
      "   -0.13152681]\n",
      "  [ 0.09867359 -0.03621556  0.03037604 ... -0.09891603  0.20481566\n",
      "    0.20355894]\n",
      "  [-0.17392012  0.38108867 -0.13005656 ... -0.10455174  0.056227\n",
      "    0.23635097]\n",
      "  ...\n",
      "  [ 0.03369423  0.09533186 -0.01984021 ...  0.11267694  0.08107851\n",
      "    0.28574365]\n",
      "  [ 0.04310169  0.18075716 -0.30980098 ... -0.07666744  0.11643103\n",
      "    0.07972808]\n",
      "  [-0.04361253 -0.2049349  -0.15696603 ...  0.15348363 -0.03907786\n",
      "   -0.19292511]]], shape=(64, 100, 65), dtype=float32)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'\\n分析输出结果\\n输出的结果是（64，100，65）\\n64表示输入的batch_size，输入多少，输出也多少，相当于一次性处理64句话\\n100 表示每句话多少个char，上面模型定义时，指定   return_sequences = True，每训练一句话中的一个char，都会把整句话的模型处理结果返回出来，\\n输出整个序列，而不是只输出一句话里面的最后一个char的output \\n65 表示每个字母对应的Dense的长度，长度为 embedding_dim，即模型输出层的units个数\\n\\n输入模型数据格式 [batch_size,seq_length] ([64,100])，\\n经过embedding层 [batch_size,seq_length,embedding_dim]  ([64,100,256])\\n经过Simple_RNN [batch_size,seq_length,RNN_units]  ([64,100,1024])\\n经过Dense层 [batch_size,seq_length,vocab_size]  ([64,100,65])\\n \\n\\n'"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n",
    "\n",
    "# 将一份数据送入模型中 进行预测\n",
    "# 模型其实就是一种运算方式，并随着其中参数的变化而变化，\n",
    "# 通过模型对象直接以函数的形式调用\n",
    "\n",
    "# 这里为什么要使用for，就是为了取出seq_dataset.take(1)中的input数据\n",
    "for input_example_batch, target_example_batch in seq_dataset.take(1):\n",
    "#     example_batch_predictions = model(input_example_batch[0])\n",
    "    example_batch_predictions=model(input_example_batch)\n",
    "    print(example_batch_predictions.shape)\n",
    "    print(example_batch_predictions)\n",
    "    \n",
    "'''\n",
    "分析输出结果\n",
    "输出的结果是（64，100，65）\n",
    "64表示输入的batch_size，输入多少，输出也多少，相当于一次性处理64句话\n",
    "100 表示每句话多少个char，上面模型定义时，指定   return_sequences = True，每训练一句话中的一个char，都会把整句话的模型处理结果返回出来，\n",
    "输出整个序列，而不是只输出一句话里面的最后一个char的output \n",
    "65 表示每个字母对应的Dense的长度，长度为 embedding_dim，即模型输出层的units个数\n",
    "\n",
    "输入模型数据格式 [batch_size,seq_length] ([64,100])，\n",
    "经过embedding层 [batch_size,seq_length,embedding_dim]  ([64,100,256]) 这一层的作用和flat是一样的，统一格式，，将每个字统一成256维度的数据\n",
    "这一层的输出结果是256\n",
    "\n",
    "经过Simple_RNN [batch_size,seq_length,RNN_units]  ([64,100,1024])\n",
    "经过Dense层 [batch_size,seq_length,vocab_size]  ([64,100,65])\n",
    " \n",
    "\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(100, 1)\n",
      "tf.Tensor(\n",
      "[41 12 37 64 33 47  1  9 55 45 22  2 60  1  5  5 49  6 16 36 62 24 13 41\n",
      " 54 56 41 54 49  4  4 44 48 13 19  0  8 13 21 23 60 49 41 49 21  3 22  7\n",
      " 23  4 63 31 50 11 57 38 31 50 42  9  9 20 43 58 42 40 23 23 19 49 27 53\n",
      " 20 21 49 12  2 62 25 32 28 44 34 58 19 16 46 56  8 57 25 51 32  6 22 22\n",
      " 16 47 42 52], shape=(100,), dtype=int64)\n"
     ]
    }
   ],
   "source": [
    "# random sampling.将输出的结果中随机选取个字母作为预测结果\n",
    "# greedy, random. 取概率最大的结果作为预测结果\n",
    "\n",
    "#logits，就是作分类任务时，经过最后的softmax之前的数据\n",
    "# 使用tf.random.categorical，来随机获取sample\n",
    "\n",
    "\n",
    "# 这个函数的作用是，从二维矩阵中，随机的获取每一行的的指定数量份数据\n",
    "# 其实说是随机，但也是存在规律的，按照指定的规则获取数据\n",
    "sample_indices = tf.random.categorical(\n",
    "    # example_batch_predictions[0]就表示一句话的数据 [100,65]\n",
    "    logits = example_batch_predictions[0],\n",
    "    # num_samples，获取的数目，指定为随机获取一个\n",
    "    # 数据维度变化 (100, 65) -> (100, 1)\n",
    "    num_samples = 1)\n",
    "\n",
    "print(sample_indices.shape)\n",
    "\n",
    "\n",
    "\n",
    "# 减少维度，\n",
    "# 将一个shape=(100,1)的矩阵，降维为长度为100的向量\n",
    "sample_indices = tf.squeeze(sample_indices, axis = -1)\n",
    "print(sample_indices)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(\n",
      "[[1.00000e+00 2.00000e+00 3.00000e+00 1.00000e+03 1.23123e+05]\n",
      " [4.00000e+00 5.00000e+00 6.00000e+00 1.00000e+04 1.23123e+05]], shape=(2, 5), dtype=float64)\n",
      "tf.Tensor(\n",
      "[[4]\n",
      " [4]], shape=(2, 1), dtype=int64)\n"
     ]
    }
   ],
   "source": [
    "data=tf.constant([\n",
    "    [1,2,3,1000,123123],\n",
    "    [4,5,6,10000,123123]\n",
    "],dtype=tf.float64)\n",
    "print(data)\n",
    "samples = tf.random.categorical(data, 1)\n",
    "print(samples)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Input:  \"n\\nCry fie upon my grave!\\n\\nLEONTES:\\nI ne'er heard yet\\nThat any of these bolder vices wanted\\nLess impu\"\n",
      "\n",
      "Output:  \"\\nCry fie upon my grave!\\n\\nLEONTES:\\nI ne'er heard yet\\nThat any of these bolder vices wanted\\nLess impud\"\n",
      "\n",
      "Predictions:  \"c?YzUi 3qgJ!v ''k,DXxLAcprcpk&&fjAG\\n.AIKvkckI$J-K&ySl;sZSld33HetdbKKGkOoHIk?!xMTPfVtGDhr.sMmT,JJDidn\"\n"
     ]
    }
   ],
   "source": [
    "# 显示input数据 和 output数据 已经模型的输出结果\n",
    "# 由于模型尚未训练，返回的结果是乱的\n",
    "print(\"Input: \", repr(\"\".join(idx2char[input_example_batch[0]])))\n",
    "print()\n",
    "print(\"Output: \", repr(\"\".join(idx2char[target_example_batch[0]])))\n",
    "print()\n",
    "print(\"Predictions: \", repr(\"\".join(idx2char[sample_indices])))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(64, 100)\n",
      "4.1913624\n"
     ]
    }
   ],
   "source": [
    "# 自定义损失函数，\n",
    "def loss(labels, logits):\n",
    "    # \n",
    "    return keras.losses.sparse_categorical_crossentropy(\n",
    "        labels, logits, from_logits=True)\n",
    "\n",
    "model.compile(optimizer = 'adam', loss = loss)\n",
    "example_loss = loss(target_example_batch, example_batch_predictions)\n",
    "print(example_loss.shape)\n",
    "print(example_loss.numpy().mean())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/100\n",
      "172/172 [==============================] - 44s 258ms/step - loss: 2.6715\n",
      "Epoch 2/100\n",
      "172/172 [==============================] - 42s 247ms/step - loss: 2.0199\n",
      "Epoch 3/100\n",
      "172/172 [==============================] - 44s 253ms/step - loss: 1.8145\n",
      "Epoch 4/100\n",
      "172/172 [==============================] - 43s 249ms/step - loss: 1.6819\n",
      "Epoch 5/100\n",
      "172/172 [==============================] - 43s 252ms/step - loss: 1.5952\n",
      "Epoch 6/100\n",
      "172/172 [==============================] - 42s 243ms/step - loss: 1.5344\n",
      "Epoch 7/100\n",
      "172/172 [==============================] - 42s 242ms/step - loss: 1.4893\n",
      "Epoch 8/100\n",
      "172/172 [==============================] - 42s 244ms/step - loss: 1.4533\n",
      "Epoch 9/100\n",
      "172/172 [==============================] - 42s 242ms/step - loss: 1.4256\n",
      "Epoch 10/100\n",
      "172/172 [==============================] - 44s 255ms/step - loss: 1.3982\n",
      "Epoch 11/100\n",
      "172/172 [==============================] - 44s 255ms/step - loss: 1.3769\n",
      "Epoch 12/100\n",
      "172/172 [==============================] - 43s 250ms/step - loss: 1.3606\n",
      "Epoch 13/100\n",
      "172/172 [==============================] - 42s 246ms/step - loss: 1.3445\n",
      "Epoch 14/100\n",
      "172/172 [==============================] - 46s 266ms/step - loss: 1.3285\n",
      "Epoch 15/100\n",
      "172/172 [==============================] - 46s 265ms/step - loss: 1.3134\n",
      "Epoch 16/100\n",
      "172/172 [==============================] - 43s 249ms/step - loss: 1.2971\n",
      "Epoch 17/100\n",
      "172/172 [==============================] - 42s 245ms/step - loss: 1.2858\n",
      "Epoch 18/100\n",
      "172/172 [==============================] - 43s 252ms/step - loss: 1.2795\n",
      "Epoch 19/100\n",
      "172/172 [==============================] - 43s 248ms/step - loss: 1.2717\n",
      "Epoch 20/100\n",
      "172/172 [==============================] - 43s 249ms/step - loss: 1.2661\n",
      "Epoch 21/100\n",
      "172/172 [==============================] - 42s 247ms/step - loss: 1.2664\n",
      "Epoch 22/100\n",
      "172/172 [==============================] - 42s 244ms/step - loss: 1.2622\n",
      "Epoch 23/100\n",
      "172/172 [==============================] - 44s 257ms/step - loss: 1.2488\n",
      "Epoch 24/100\n",
      "172/172 [==============================] - 44s 255ms/step - loss: 1.2376\n",
      "Epoch 25/100\n",
      "172/172 [==============================] - 46s 267ms/step - loss: 1.2314\n",
      "Epoch 26/100\n",
      "172/172 [==============================] - 44s 257ms/step - loss: 1.2292\n",
      "Epoch 27/100\n",
      "172/172 [==============================] - 47s 275ms/step - loss: 1.2240\n",
      "Epoch 28/100\n",
      "172/172 [==============================] - 47s 271ms/step - loss: 1.2187\n",
      "Epoch 29/100\n",
      "172/172 [==============================] - 45s 260ms/step - loss: 1.2170\n",
      "Epoch 30/100\n",
      "172/172 [==============================] - 43s 250ms/step - loss: 1.2153\n",
      "Epoch 31/100\n",
      "172/172 [==============================] - 44s 256ms/step - loss: 1.2078\n",
      "Epoch 32/100\n",
      "172/172 [==============================] - 43s 252ms/step - loss: 1.1992\n",
      "Epoch 33/100\n",
      "172/172 [==============================] - 44s 256ms/step - loss: 1.1921\n",
      "Epoch 34/100\n",
      "172/172 [==============================] - 46s 265ms/step - loss: 1.1871\n",
      "Epoch 35/100\n",
      "172/172 [==============================] - 47s 273ms/step - loss: 1.1852\n",
      "Epoch 36/100\n",
      "172/172 [==============================] - 46s 270ms/step - loss: 1.1853\n",
      "Epoch 37/100\n",
      "172/172 [==============================] - 45s 260ms/step - loss: 1.1817\n",
      "Epoch 38/100\n",
      "172/172 [==============================] - 45s 259ms/step - loss: 1.1756\n",
      "Epoch 39/100\n",
      "172/172 [==============================] - 45s 262ms/step - loss: 1.1720\n",
      "Epoch 40/100\n",
      "172/172 [==============================] - 44s 256ms/step - loss: 1.1726\n",
      "Epoch 41/100\n",
      "172/172 [==============================] - 45s 264ms/step - loss: 1.1755\n",
      "Epoch 42/100\n",
      "172/172 [==============================] - 44s 256ms/step - loss: 1.1742\n",
      "Epoch 43/100\n",
      "172/172 [==============================] - 45s 259ms/step - loss: 1.1753\n",
      "Epoch 44/100\n",
      "172/172 [==============================] - 44s 255ms/step - loss: 1.1738\n",
      "Epoch 45/100\n",
      "172/172 [==============================] - 45s 262ms/step - loss: 1.1729\n",
      "Epoch 46/100\n",
      "172/172 [==============================] - 46s 266ms/step - loss: 1.1669\n",
      "Epoch 47/100\n",
      "172/172 [==============================] - 46s 270ms/step - loss: 1.1588\n",
      "Epoch 48/100\n",
      "172/172 [==============================] - 45s 264ms/step - loss: 1.1575\n",
      "Epoch 49/100\n",
      "172/172 [==============================] - 45s 260ms/step - loss: 1.1522\n",
      "Epoch 50/100\n",
      "172/172 [==============================] - 46s 266ms/step - loss: 1.1487\n",
      "Epoch 51/100\n",
      "172/172 [==============================] - 46s 269ms/step - loss: 1.1487\n",
      "Epoch 52/100\n",
      "172/172 [==============================] - 45s 263ms/step - loss: 1.1483\n",
      "Epoch 53/100\n",
      "172/172 [==============================] - 44s 255ms/step - loss: 1.1493\n",
      "Epoch 54/100\n",
      "172/172 [==============================] - 44s 256ms/step - loss: 1.1504\n",
      "Epoch 55/100\n",
      "172/172 [==============================] - 44s 253ms/step - loss: 1.1515\n",
      "Epoch 56/100\n",
      "172/172 [==============================] - 45s 260ms/step - loss: 1.1475\n",
      "Epoch 57/100\n",
      "172/172 [==============================] - 44s 256ms/step - loss: 1.1474\n",
      "Epoch 58/100\n",
      "172/172 [==============================] - 45s 262ms/step - loss: 1.1446\n",
      "Epoch 59/100\n",
      "172/172 [==============================] - 45s 262ms/step - loss: 1.1424\n",
      "Epoch 60/100\n",
      "172/172 [==============================] - 46s 269ms/step - loss: 1.1441\n",
      "Epoch 61/100\n",
      "172/172 [==============================] - 46s 267ms/step - loss: 1.1412\n",
      "Epoch 62/100\n",
      "172/172 [==============================] - 46s 270ms/step - loss: 1.1413\n",
      "Epoch 63/100\n",
      "172/172 [==============================] - 46s 265ms/step - loss: 1.1443\n",
      "Epoch 64/100\n",
      "172/172 [==============================] - 46s 266ms/step - loss: 1.1448\n",
      "Epoch 65/100\n",
      "172/172 [==============================] - 46s 265ms/step - loss: 1.1450\n",
      "Epoch 66/100\n",
      "172/172 [==============================] - 44s 258ms/step - loss: 1.1474\n",
      "Epoch 67/100\n",
      "172/172 [==============================] - 45s 264ms/step - loss: 1.1474\n",
      "Epoch 68/100\n",
      "172/172 [==============================] - 46s 267ms/step - loss: 1.1486\n",
      "Epoch 69/100\n",
      "172/172 [==============================] - 46s 268ms/step - loss: 1.1476\n",
      "Epoch 70/100\n",
      "172/172 [==============================] - 45s 264ms/step - loss: 1.1495\n",
      "Epoch 71/100\n",
      "172/172 [==============================] - 44s 258ms/step - loss: 1.1506\n",
      "Epoch 72/100\n",
      "172/172 [==============================] - 43s 248ms/step - loss: 1.1544\n",
      "Epoch 73/100\n",
      "172/172 [==============================] - 46s 268ms/step - loss: 1.1541\n",
      "Epoch 74/100\n",
      "172/172 [==============================] - 45s 262ms/step - loss: 1.1587\n",
      "Epoch 75/100\n",
      "172/172 [==============================] - 45s 260ms/step - loss: 1.1578\n",
      "Epoch 76/100\n",
      "172/172 [==============================] - 46s 268ms/step - loss: 1.1573\n",
      "Epoch 77/100\n",
      "172/172 [==============================] - 45s 263ms/step - loss: 1.1596\n",
      "Epoch 78/100\n",
      "172/172 [==============================] - 47s 270ms/step - loss: 1.1600\n",
      "Epoch 79/100\n",
      "172/172 [==============================] - 45s 264ms/step - loss: 1.1677\n",
      "Epoch 80/100\n",
      "172/172 [==============================] - 45s 263ms/step - loss: 1.1643\n",
      "Epoch 81/100\n",
      "172/172 [==============================] - 46s 266ms/step - loss: 1.1679\n",
      "Epoch 82/100\n",
      "172/172 [==============================] - 46s 267ms/step - loss: 1.1707\n",
      "Epoch 83/100\n",
      "172/172 [==============================] - 45s 260ms/step - loss: 1.1684\n",
      "Epoch 84/100\n",
      "172/172 [==============================] - 45s 264ms/step - loss: 1.1718\n",
      "Epoch 85/100\n",
      "172/172 [==============================] - 44s 256ms/step - loss: 1.1779\n",
      "Epoch 86/100\n",
      "172/172 [==============================] - 44s 257ms/step - loss: 1.1780\n",
      "Epoch 87/100\n",
      "172/172 [==============================] - 46s 265ms/step - loss: 1.1796\n",
      "Epoch 88/100\n",
      "172/172 [==============================] - 46s 270ms/step - loss: 1.1776\n",
      "Epoch 89/100\n",
      "172/172 [==============================] - 46s 268ms/step - loss: 1.1848\n",
      "Epoch 90/100\n",
      "172/172 [==============================] - 47s 271ms/step - loss: 1.1847\n",
      "Epoch 91/100\n",
      "172/172 [==============================] - 46s 265ms/step - loss: 1.1857\n",
      "Epoch 92/100\n",
      "172/172 [==============================] - 46s 266ms/step - loss: 1.1885\n",
      "Epoch 93/100\n",
      "172/172 [==============================] - 45s 263ms/step - loss: 1.1917\n",
      "Epoch 94/100\n",
      "172/172 [==============================] - 44s 258ms/step - loss: 1.1908\n",
      "Epoch 95/100\n",
      "172/172 [==============================] - 43s 252ms/step - loss: 1.1998\n",
      "Epoch 96/100\n",
      "172/172 [==============================] - 46s 270ms/step - loss: 1.2035\n",
      "Epoch 97/100\n",
      "172/172 [==============================] - 46s 267ms/step - loss: 1.2057\n",
      "Epoch 98/100\n",
      "172/172 [==============================] - 44s 258ms/step - loss: 1.2016\n",
      "Epoch 99/100\n",
      "172/172 [==============================] - 44s 256ms/step - loss: 1.2069\n",
      "Epoch 100/100\n",
      "172/172 [==============================] - 46s 267ms/step - loss: 1.2162\n"
     ]
    }
   ],
   "source": [
    "output_dir = \"./text_generation_checkpoints\"\n",
    "if not os.path.exists(output_dir):\n",
    "    os.mkdir(output_dir)\n",
    "checkpoint_prefix = os.path.join(output_dir, 'ckpt_{epoch}')\n",
    "# 定义模型的callback\n",
    "checkpoint_callback = keras.callbacks.ModelCheckpoint(\n",
    "    filepath = checkpoint_prefix,\n",
    "    ## 指定只保存模型的训练参数，没有保存模型结构\n",
    "    ## 因此需要手动初始化模型结构（创建模型对象），并使用load_weitghts()方法，加载参数\n",
    "    save_weights_only = True)\n",
    "\n",
    "epochs = 100\n",
    "history = model.fit(seq_dataset, epochs = epochs,\n",
    "                    callbacks = [checkpoint_callback])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'./text_generation_checkpoints/ckpt_100'"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tf.train.latest_checkpoint(output_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential_1\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "embedding_1 (Embedding)      (1, None, 256)            16640     \n",
      "_________________________________________________________________\n",
      "simple_rnn_1 (SimpleRNN)     (1, None, 1024)           1311744   \n",
      "_________________________________________________________________\n",
      "dense_1 (Dense)              (1, None, 65)             66625     \n",
      "=================================================================\n",
      "Total params: 1,395,009\n",
      "Trainable params: 1,395,009\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "# 创建模型\n",
    "model2 = build_model(vocab_size,\n",
    "                     embedding_dim,\n",
    "                     rnn_units,\n",
    "                     batch_size = 1)\n",
    "# 从之前训练的模型中，选出效果最好的模型阶段，将其模型参数载入\n",
    "# 从 HDF5 文件（由 save_weights 创建）中加载权重。默认情况下，模型的结构应该是不变的。\n",
    "model2.load_weights(tf.train.latest_checkpoint(output_dir))\n",
    "\n",
    "# 调用模型的build方法，设置模型的输入size\n",
    "# 1表示一个样本数，None表示变长\n",
    "model2.build(tf.TensorShape([1,None]))\n",
    "\n",
    "\n",
    "# start ch sequence A,      起始字符串 A\n",
    "# A -> model -> b           把起始字符串输入模型，得到预测结果b\n",
    "# A.append(b) -> B          把b append 到A后面，拼接之后的结果为B\n",
    "# B(Ab) -> model -> c       把B 输入模型 ，得到预测结果c\n",
    "# B.append(c) -> C          B+c 得到 C\n",
    "# C(Abc) -> model -> ...\n",
    "model2.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "All: his face beauty's slanders. Well, and some unto the storm'd fear; then dreams.\n",
      "\n",
      "CORIOLANUS:\n",
      "I will be made;\n",
      "Rike unwilling I will purposed-head:\n",
      "And I will stir the man\n",
      "And all the from your parciur flies by himself. Well;\n",
      "I am the widows fair? s, Auch\n",
      "An utterance on many lines, I'ld s, and thou never ed.\n",
      "Some happy stride, and supll you want fairest twenty soled no geedle moise his own alone! I am toUS:\n",
      "Hair undood oppasies? Or Your issues sir,\n",
      "That lies alenty; and therefore from your wealth.\n",
      "\n",
      "ROMEO:\n",
      "As I beleavent an England.\n",
      "\n",
      "TYBALT:\n",
      "My love: be of thee the flesh long-prounds. Grace now, gentle more than Jove, my gracious souls,\n",
      "Brief, his father,\n",
      "As, by some instery assurakery live,\n",
      "And finderape.\n",
      "\n",
      "CARESLENT:\n",
      "My lord of France?\n",
      "Why, what I had sees to Rome, name myself; it wass'd, being all eet and being sudd it yet?\n",
      "\n",
      "RAMILANUS:\n",
      "Now, brother Clarence.\n",
      "Of all run arms\n",
      "By Bianciau-house your grandsistail.\n",
      "\n",
      "PETRUCHIO:\n",
      "Come, content ines, it is here?\n",
      "This day, my lord, will you thing\n"
     ]
    }
   ],
   "source": [
    "# 调用函数，预测数据\n",
    "# num_generate 表示生成字符串数目\n",
    "def generate_text(model, start_string, num_generate = 1000):\n",
    "    # 获取最先输入模型的start_string的id 式\n",
    "    # 使用循环表达式\n",
    "    input_eval = [char2idx[ch] for ch in start_string]\n",
    "    # 使用tf.expand_dims ，对数据进行扩容，因为模型接受的数据格式是 二维矩阵 输入的数据是个[1,None]的矩阵（在模型build时指定的）\n",
    "    # 进行维度扩展，在当前维度前，再加一个维度\n",
    "    input_eval = tf.expand_dims(input_eval, 0)\n",
    "    \n",
    "    # 最终生成的数据\n",
    "    text_generated = []\n",
    "    \n",
    "    # 此时是使用实际数据来进行预测过程，不是训练。\n",
    "    # 需要将模型原先的状态值给reset掉\n",
    "    model.reset_states()\n",
    "   \n",
    "    # 因为一共生成1000个字符，循环1000次\n",
    "    for _ in range(num_generate):\n",
    "        # 1. model inference -> predictions\n",
    "        # 2. sample -> ch -> text_generated.\n",
    "        # 3. update input_eval\n",
    "        \n",
    "        \n",
    "        # 输入一句话，产生预测结果\n",
    "        # shape  predictions : [batch_size, input_eval_len, vocab_size]\n",
    "        predictions = model(input_eval)\n",
    "        \n",
    "        \n",
    "        # 将预测结果（模型返回数据）进行降维，\n",
    "        # predictions : [input_eval_len, vocab_size]\n",
    "        predictions = tf.squeeze(predictions, 0)\n",
    "        \n",
    "        \n",
    "        # 随机的选出 \n",
    "        # predicted_ids: [input_eval_len, 1]\n",
    "        # a b c -> b c d\n",
    "        predicted_id = tf.random.categorical(\n",
    "            predictions, num_samples = 1)[-1, 0].numpy()\n",
    "        \n",
    "        # 一个字符串预测出一个char\n",
    "        text_generated.append(idx2char[predicted_id])\n",
    "        # s, x -> rnn -> s', y\n",
    "        input_eval = tf.expand_dims([predicted_id], 0)\n",
    "    return start_string + ''.join(text_generated)\n",
    "\n",
    "new_text = generate_text(model2, \"All: \")\n",
    "print(new_text)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
