{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "原始链接：https://keras.io/examples/generative/text_generation_with_miniature_gpt/"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T10:28:23.418631Z",
     "start_time": "2020-06-10T10:28:22.556225Z"
    }
   },
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow import keras\n",
    "from tensorflow.keras import layers\n",
    "from tensorflow.keras.layers.experimental.preprocessing import TextVectorization\n",
    "import numpy as np\n",
    "import os\n",
    "import re\n",
    "import string\n",
    "import random"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 自注意力层\n",
    "> TODO：填充的掩码，如何处理？"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T11:12:50.424044Z",
     "start_time": "2020-06-10T11:12:50.411683Z"
    }
   },
   "outputs": [],
   "source": [
    "class MultiHeadAttention(layers.Layer):\n",
    "    def __init__(self, embed_dim, num_heads=8):\n",
    "        super(MultiHeadAttention, self).__init__()\n",
    "        self.embed_dim = embed_dim\n",
    "        self.num_heads = num_heads\n",
    "\n",
    "        if embed_dim % num_heads != 0:\n",
    "            raise ValueError(\n",
    "                f\"embedding dimension = {embed_dim} should be divisible by number of heads = \"\n",
    "                f\"{num_heads}\")\n",
    "        self.projection_dim = embed_dim // num_heads\n",
    "        self.query_dense = layers.Dense(embed_dim)\n",
    "        self.key_dense = layers.Dense(embed_dim)\n",
    "        self.value_dense = layers.Dense(embed_dim)\n",
    "        self.combined_heads = layers.Dense(embed_dim)\n",
    "\n",
    "    @staticmethod\n",
    "    def casual_attention_mask(n_dest, n_src, dtype):\n",
    "        \"\"\"\n",
    "        n_dest： 目标序列长度\n",
    "        n_src： 源序列长度\n",
    "        return： [n_dest,n_src]\n",
    "        \"\"\"\n",
    "\n",
    "        i = tf.range(n_dest)[:, None]\n",
    "        j = tf.range(n_src)\n",
    "        m = i >= j - n_src + n_dest\n",
    "        return tf.cast(m, dtype)\n",
    "\n",
    "    def attention(self, query, key, value):\n",
    "        \"\"\"\n",
    "        query/key/value: (batch_size, num_heads, seq_len, projection_dim)\n",
    "\n",
    "        \"\"\"\n",
    "\n",
    "        # (batch_size, num_heads, seq_len, seq_len)\n",
    "        score = tf.matmul(query, key, transpose_b=True)\n",
    "        dim_key = tf.cast(tf.shape(key)[-1], tf.float32)\n",
    "        scaled_score = score / tf.math.sqrt(dim_key)\n",
    "\n",
    "        # 防止获取到 当前标记 后面标记的信息\n",
    "        shape = tf.shape(scaled_score)\n",
    "        dim_dest, dim_src = shape[2], shape[3]\n",
    "        attention_mask = self.casual_attention_mask(\n",
    "            dim_dest,\n",
    "            dim_src,\n",
    "            scaled_score.dtype,\n",
    "        )\n",
    "        attention_mask = tf.reshape(attention_mask, [1, 1, dim_dest, dim_src])\n",
    "        scaled_score = scaled_score * attention_mask - 1e4 * (1 -\n",
    "                                                              attention_mask)\n",
    "\n",
    "        # (batch_size, num_heads, seq_len, seq_len)\n",
    "        weights = tf.nn.softmax(scaled_score, axis=-1)\n",
    "\n",
    "        # (batch_size, num_heads, seq_len, projection_dim)\n",
    "        output = tf.matmul(weights, value)\n",
    "        return output, weights\n",
    "\n",
    "    def separate_heads(self, x, batch_size):\n",
    "        x = tf.reshape(\n",
    "            x,\n",
    "            (batch_size, -1, self.num_heads, self.projection_dim),\n",
    "        )\n",
    "        return tf.transpose(x, perm=[0, 2, 1, 3])\n",
    "\n",
    "    def call(self, inputs, **kwargs):\n",
    "        # batch_size, seq_len, embedding_size\n",
    "        batch_size = tf.shape(inputs)[0]\n",
    "\n",
    "        # (batch_size, seq_len, embed_dim)\n",
    "        query = self.query_dense(inputs)\n",
    "        key = self.key_dense(inputs)\n",
    "        value = self.value_dense(inputs)\n",
    "\n",
    "        # (batch_size, num_heads, seq_len, projection_dim)\n",
    "        query = self.separate_heads(query, batch_size)\n",
    "        key = self.separate_heads(key, batch_size)\n",
    "        value = self.separate_heads(value, batch_size)\n",
    "\n",
    "        # (batch_size, num_heads, seq_len, projection_dim)\n",
    "        attention, weights = self.attention(query, key, value)\n",
    "        attention = tf.transpose(attention, perm=[0, 2, 1, 3])\n",
    "        concat_attention = tf.reshape(attention,\n",
    "                                      (batch_size, -1, self.embed_dim))\n",
    "        # (batch_size, seq_len, embed_size)\n",
    "        output = self.combined_heads(concat_attention)\n",
    "        return output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T11:09:17.835394Z",
     "start_time": "2020-06-10T11:09:17.723810Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Tensor: shape=(4, 4), dtype=float32, numpy=\n",
       "array([[1., 0., 0., 0.],\n",
       "       [1., 1., 0., 0.],\n",
       "       [1., 1., 1., 0.],\n",
       "       [1., 1., 1., 1.]], dtype=float32)>"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 创建掩码，遮掩 当前标记 之后的所有标记\n",
    "n_dest = 4\n",
    "n_src = 4\n",
    "\n",
    "\n",
    "def create_attention_mask(n_dest, n_src, dtype=tf.float32):\n",
    "    i = tf.range(n_dest)[:, None]\n",
    "    j = tf.range(n_src)\n",
    "    m = i >= j - n_src + n_dest\n",
    "    return tf.cast(m, dtype)\n",
    "\n",
    "\n",
    "create_attention_mask(n_dest, n_src)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T11:13:12.947266Z",
     "start_time": "2020-06-10T11:13:12.833523Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Before mask:\n",
      " tf.Tensor(\n",
      "[[[[0.10547268 0.7463316  0.45979226]\n",
      "   [0.95134735 0.31935334 0.34476125]\n",
      "   [0.6548263  0.9963918  0.38449323]]]], shape=(1, 1, 3, 3), dtype=float32)\n",
      "After mask:\n",
      " tf.Tensor(\n",
      "[[[[ 1.05472684e-01 -1.00000000e+04 -1.00000000e+04]\n",
      "   [ 9.51347351e-01  3.19353342e-01 -1.00000000e+04]\n",
      "   [ 6.54826283e-01  9.96391773e-01  3.84493232e-01]]]], shape=(1, 1, 3, 3), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "# 掩码操作\n",
    "# batch_size=1,num_heads=1, seq_len=3\n",
    "scaled_score = tf.random.uniform((1, 1, 3, 3), 0, 1, dtype=tf.float32)\n",
    "print(\"Before mask:\\n\", scaled_score)\n",
    "\n",
    "_, _, dim_dest, dim_src = tf.shape(scaled_score)\n",
    "attention_mask = create_attention_mask(dim_dest, dim_src)\n",
    "\n",
    "scaled_score = scaled_score * attention_mask - 1e4 * (1 - attention_mask)\n",
    "print(\"After mask:\\n\", scaled_score)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T11:13:35.505501Z",
     "start_time": "2020-06-10T11:13:35.490861Z"
    },
    "lines_to_next_cell": 2
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(\n",
      "[[[-0.38252696  0.36132467 -0.13862547 -1.1787404  -0.09665567\n",
      "    0.02887212 -0.34742364  0.69407684  0.4977634   0.11721583]\n",
      "  [-0.3253528   0.27771837 -0.38126877 -1.2979437  -0.3482508\n",
      "    0.20360255 -0.34971687  0.7869941   0.6485061   0.3667486 ]\n",
      "  [-0.29071248  0.1254045  -0.55469835 -1.0486559  -0.58551824\n",
      "    0.3572234  -0.20800653  0.6436509   0.69355655  0.6243084 ]]], shape=(1, 3, 10), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "# batch_size=1, seq_len=3, embed_size=4\n",
    "inputs = tf.random.uniform((1, 3, 4), 0, 1, dtype=tf.float32)\n",
    "embed_dim = 10\n",
    "num_heads = 5\n",
    "attention = MultiHeadAttention(embed_dim, num_heads)\n",
    "output = attention(inputs)\n",
    "print(output)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Transformer 层"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T11:16:15.613358Z",
     "start_time": "2020-06-10T11:16:15.606281Z"
    }
   },
   "outputs": [],
   "source": [
    "class TransformerBlock(layers.Layer):\n",
    "    def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):\n",
    "        super(TransformerBlock, self).__init__()\n",
    "        self.attn = MultiHeadAttention(embed_dim, num_heads)\n",
    "        \n",
    "        # 前向层\n",
    "        self.ffn = keras.Sequential([\n",
    "            layers.Dense(ff_dim, activation='relu'),\n",
    "            layers.Dense(embed_dim),\n",
    "        ])\n",
    "        \n",
    "        # 正则化层\n",
    "        self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)\n",
    "        self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)\n",
    "        self.dropout1 = layers.Dropout(rate)\n",
    "        self.dropout2 = layers.Dropout(rate)\n",
    "\n",
    "    def call(self, inputs):  # batch_size,seq_len,embed_size\n",
    "        attention_output = self.attn(inputs)  # batch_size,seq_len,embed_size\n",
    "        attention_output = self.dropout1(attention_output)\n",
    "        out1 = self.layernorm1(attention_output)\n",
    "        ffn_output = self.ffn(out1)  # batch_size,seq_len,embed_size\n",
    "        ffn_output = self.dropout2(ffn_output)  # batch_size,seq_len,embed_size\n",
    "        return self.layernorm2(out1 + ffn_output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T11:16:45.551915Z",
     "start_time": "2020-06-10T11:16:45.416510Z"
    },
    "lines_to_next_cell": 2
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(\n",
      "[[[-0.00332756  1.1323926   1.0909868  -1.5782332   0.22285575\n",
      "    0.91380244  0.5024045  -0.2707299  -0.05485689 -1.9552947 ]\n",
      "  [-0.00368661  1.4767317   0.65466416  0.23449448 -0.6320796\n",
      "    0.9992423  -0.26263362 -0.07430581  0.02879716 -2.421224  ]\n",
      "  [-0.16078229  1.4712099  -0.09554663  1.1715316  -1.1065551\n",
      "    0.80093515 -0.24299382 -0.16937007  0.40726507 -2.0756936 ]]], shape=(1, 3, 10), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "ff_dim = 10\n",
    "transformer = TransformerBlock(embed_dim, num_heads, ff_dim)\n",
    "print(transformer(inputs))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 嵌入层"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T11:22:08.503820Z",
     "start_time": "2020-06-10T11:22:08.496863Z"
    }
   },
   "outputs": [],
   "source": [
    "class TokenAndPositionEmbedding(layers.Layer):\n",
    "    def __init__(self, maxlen, vocab_size, embed_dim):\n",
    "        super(TokenAndPositionEmbedding, self).__init__()\n",
    "        # 词嵌入\n",
    "        self.token_embed = layers.Embedding(\n",
    "            input_dim=vocab_size,\n",
    "            output_dim=embed_dim,\n",
    "        )\n",
    "        # 位置编码,也是待训练参数\n",
    "        self.pos_embed = layers.Embedding(\n",
    "            input_dim=maxlen,\n",
    "            output_dim=embed_dim,\n",
    "        )\n",
    "\n",
    "    def call(self, x):\n",
    "        maxlen = tf.shape(x)[-1]\n",
    "        positions = tf.range(start=0, limit=maxlen, delta=1)\n",
    "        positions = self.pos_embed(positions)\n",
    "        x = self.token_embed(x)\n",
    "        return x + positions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T11:22:09.838688Z",
     "start_time": "2020-06-10T11:22:09.721647Z"
    },
    "lines_to_next_cell": 2
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(\n",
      "[[[ 0.00485352  0.04649875  0.01722922 -0.08977088  0.03237002\n",
      "    0.03043242  0.0164837  -0.01169515 -0.03283117  0.07614163]\n",
      "  [ 0.03652454  0.00519327 -0.04325895 -0.03345573 -0.0163762\n",
      "   -0.01602948  0.06325639 -0.02854483  0.00524993 -0.04182397]\n",
      "  [ 0.01462177  0.04132431  0.02875835  0.01675186 -0.01151166\n",
      "    0.05898074  0.05975775  0.08203638  0.03354775 -0.00828075]\n",
      "  [ 0.00346811  0.0666132   0.0304955  -0.08781236  0.04581798\n",
      "    0.00912217 -0.01231779 -0.02989997  0.02603679  0.02221246]\n",
      "  [ 0.0293895  -0.01145194  0.08149004 -0.03042657 -0.02672334\n",
      "    0.04037578 -0.05371469 -0.04414493 -0.03230675  0.04327554]]\n",
      "\n",
      " [[-0.05091096  0.07347768  0.0649236  -0.04824299 -0.02064015\n",
      "    0.00601267  0.00380222 -0.04921037  0.00894145 -0.00561922]\n",
      "  [ 0.01696062  0.01604785  0.01192685 -0.04674814 -0.00258112\n",
      "    0.06363203  0.02451729 -0.05413387  0.07132256 -0.02281509]\n",
      "  [ 0.0071785   0.05307038  0.05278654  0.01842588 -0.04215529\n",
      "    0.04731597  0.00161308  0.00769514  0.04309616 -0.09342918]\n",
      "  [-0.00963625  0.05734295  0.07304855 -0.00774929  0.08302666\n",
      "   -0.03335828  0.00137042 -0.00269059  0.05748196 -0.01081836]\n",
      "  [ 0.04573686  0.02500467  0.08140367  0.03473415  0.05546828\n",
      "   -0.02015295 -0.02831808 -0.01994301 -0.01692468  0.04432578]]], shape=(2, 5, 10), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "maxlen = 5\n",
    "vocab_size = 20\n",
    "embed = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)\n",
    "\n",
    "# batch=2, seq_len=5\n",
    "x = tf.constant(np.random.randint(1, 15, (2, maxlen), dtype=np.int32))\n",
    "print(embed(x))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# GPT 模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T11:25:08.103465Z",
     "start_time": "2020-06-10T11:25:08.098321Z"
    }
   },
   "outputs": [],
   "source": [
    "vocab_size = 20000\n",
    "maxlen = 100\n",
    "embed_dim = 256\n",
    "num_heads = 2\n",
    "ff_dim = 256"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T11:25:10.971413Z",
     "start_time": "2020-06-10T11:25:10.965088Z"
    }
   },
   "outputs": [],
   "source": [
    "def create_model():\n",
    "    # batch,seq_len --> batch,seq_len,embed_size\n",
    "    inputs = layers.Input(shape=(maxlen,), dtype=tf.int32)\n",
    "    embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)\n",
    "    x = embedding_layer(inputs)\n",
    "    \n",
    "    # batch,seq_len,embed_size --> batch,seq_len,vocab_size\n",
    "    transformer = TransformerBlock(embed_dim, num_heads, ff_dim)\n",
    "    x = transformer(x)\n",
    "    outputs = layers.Dense(vocab_size)(x)\n",
    "    model = keras.Model(inputs=inputs, outputs=[outputs, x])\n",
    "    \n",
    "    loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n",
    "    \n",
    "    model.compile(\"adam\", loss=[loss_fn, None])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 100,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T12:57:20.129878Z",
     "start_time": "2020-06-10T12:57:20.124831Z"
    }
   },
   "outputs": [],
   "source": [
    "batch_size = 32\n",
    "filenames = []\n",
    "directories = [\n",
    "    \"aclImdb/train/pos\",\n",
    "    \"aclImdb/train/neg\",\n",
    "    \"aclImdb/test/pos\",\n",
    "    \"aclImdb/test/neg\",\n",
    "]\n",
    "base_dir = \"../datasets\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T12:57:20.518761Z",
     "start_time": "2020-06-10T12:57:20.461010Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "50000 files\n"
     ]
    }
   ],
   "source": [
    "for dir in directories:\n",
    "    dir = os.path.join(base_dir, dir)\n",
    "    for f in os.listdir(dir):\n",
    "        filenames.append(os.path.join(dir, f))\n",
    "\n",
    "print(f\"{len(filenames)} files\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T12:57:22.015213Z",
     "start_time": "2020-06-10T12:57:21.965879Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[\"This is halfway to being a top movie. The opening section, which spoofs Hollywood \\\"social message\\\" films is absolutely brilliant. It is a riot from start to finish.<br /><br />The second section, which introduces us to the main characters of the story is really great too. We get a lot of great comic setups, top notch performances, and the dialog is really dynamic.<br /><br />(Spoiler warning!)<br /><br />The one think that really annoyed me about this film though is the ending, which I think contradicts everything that went before. My interpretation was that this film was taking the mickey out all the silly prejudices and innuendo of small town gossip and national tabloid sensationalism. I loved that the film was championing the cause that a person\\'s sexuality is NOT determined by their hobbies, idiosyncrasies, fashion sense or whatever. And then the ending goes and re-enforces all the gossip and stereotypes that the movie successfully lampooned in the first place. It turns out everyone was 100% right!!! (godamit!) This was very disappointing to what was actually a great story.\" \"This movie was good for it\\'s time. If you like Eddie Murpy this is a must have to add to your collection. Eddie was young and funny with his 80\\'s haircut. Charlotte Lewis, Eddie\\'s costar is hot. This was one of her first movies and she was not bad. The graphics were good for the 80\\'s. A lot of the actors went on to do other good movies you should check them out through IMDb. Other must have from Eddie is \\\"Coming to America\\\" and \\\"48 hours\\\". Another actor \\\"Victor Wong\\\" has a small part in this movie. Check out some of his older movies like \\\"Big trouble in little china\\\". If you liked the action movies from the 80\\'s this is your movie.\" \"About two hundred members of a Cleveland, Ohio USA film society, named Cinematheque, gathered on August 19, 2000 to view a pristine Cinemascope print of Michelangelo Antonioni\\'s 1970 film, \\\"Zabriskie Point.\\\" Cinematheque Director John Ewing, who does a superlative job of obtaining the finest prints for his series, shared with the audience beforehand that this print was specially flown over from Italy for this one showing only.<br /><br />The audience was held spellbound as the film unfolded its artisty on the huge panoramic screen. Watching this superb print, shown the way Antonioni intended, made one aware that this is indeed a modern art work. It was all the more fitting that the series is housed in the Cleveland Insititue of Art in University Circle. <br /><br />Antonioni\\'s compositions are created for the Cinemascope landscape. His beautiful balancing of images, striking use of colors, sweeping choreographic movements, all are the work of a genuine artist, using the screen as his canvas. <br /><br />At last the audience could understand \\\"Zabriskie Point.\\\" As its narrative unfolded, it became obvious that this work is not about story per se, but rather an artist\\'s impressionistic rendering of fleeting images of his subject. The setting of some of the more turbulent activities of the sixties provides only a dramatic motor for the artist\\'s sweeping collage. <br /><br />Antonioni is not bound by conventional narrative standards, and can pause at any point to creatively embroider an event with grandiose embellishments. The audience willingly went with the flow of his remarkable imagination, as his huge images on the massive canvas held one in rapt attention. While the audience may have been only tangentially involved in character relationships, it realized the theme here is human aleination, the director\\'s recurring theme. <br /><br />It was also realized that no print any smaller or of lesser quality than this original one in Cinemascope can do justice to this particular rendering. The audience was therefore all the more appreciative of viewing \\\"Zabriskie Point\\\" in its original, breathtaking format, and broke into thunderous applause at the end.\" ... \"I\\'ve noticed that a lot of people are taking Opera to task for the way Betty reacts to the murders. I think they are basing these complaints on how they imagine a \\\"normal\\\" person would react. The thing is...Betty is not a \\\"normal\\\" person, due to traumatic events in her childhood. She has problems way way before the movie ever even starts...and by the end of Opera...in my opinion...she has become totally unhinged.<br /><br />---------------------SPOILERS--------------------------------------- You have to keep in mind that when she was a very small child she witnessed her mother\\'s lover commit at least one brutal murder while her sadomasochist mother was getting off watching it.<br /><br />She was raised by a woman who achieves sexual release tied up watching girls get hacked, slashed, and strangled to death. That does not make for a healthy home life. I think it\\'s pretty easy to conclude that her mother would have employed all sorts of emotional manipulation and negative reinforcement to ensure that her daughter never snitched on her. It is also likely that at her impressionable age, Betty might have been deeply confused by what she saw. Is this just something that adults do, etc.<br /><br />Betty obviously looks up to her mother...I mean...she\\'s become an opera singer just like her. If mommy likes it it can\\'t be bad, can it...mommy can\\'t be bad, can she? She couldn\\'t tell the police on her mommy or this mysterious hooded fellow she associates with mommy.<br /><br />Betty has a lot of deep-seated emotional issues. Her mind has for years been trying to block out the memory of what she saw her mother doing...but it keeps coming to the surface, manifesting itself in the form of horrible nightmares, skull-throbbing migraines, a dependence on relaxation techniques, and sexual frigidity She associates brutal violence/bloody death with sex on a subconscious level. There\\'s an inner struggle between the part of Betty that has confused murder/sex and the part of her which believes these things to be wrong.<br /><br />After she\\'s seen her boyfriend murdered by the hooded man...she calls the police, yet is unwilling to give her name. The part of her that thinks murder is wrong forces her to make the call, but the part that is ambivalent won\\'t allow her to admit personal involvement. The ambivalent part of her takes control before she can go all the way. So she walks away from the phone in the rain...and when she\\'s picked up by the director she\\'s acting surprisingly calm, not as upset as you would think a \\\"normal\\\" person would be...because the part of her that\\'s been blocking stuff since she was a child is trying its damnedest to block the horror of what she\\'s just witnessed.<br /><br />The state of affairs in her life all contribute to an impasse within Betty\\'s psyche. Her singing career is starting to bear fruit...she\\'s going to be a great opera singer like her mother was. But is she going to become like her mother in all ways? In the darker ways? Or will she be able to make her own path? Add this to the re-emergence of the hooded man murdering everyone around her.<br /><br />It\\'s not until the hooded man kills Daria Nicolodi\\'s character that Betty really takes an active role in defeating the killer. Here\\'s someone who loves Betty, who\\'s supported her wholeheartedly in her emerging career, who is in fact a maternal figure in Betty\\'s life now since mommy\\'s dead. Imagine how terrible it would be to lose your real mother and then to see the woman who is the closest thing you have to a mother get shot through the eyeball.<br /><br />I could go on...but I won\\'t. The main gist of what I\\'m saying is that the character of Betty is a lot more complex than most of the reviewers on here have been willing to acknowledge.<br /><br />Opera is one of Argento\\'s best...and not just for the visuals alone (although they are truly magnificent) and not just for the inventive murders (although they are). There is a depth here...and attention needs to be paid.\" \"This movie is well made, it is beautiful and wise. It is heart-warming. It is great. And again it shows how great Peter Falk is... he is fantastic and he even gets better, the older he gets! Thank you, Peter Falk! Thank you very much for this gem of a movie! <br /><br />This movie entertains. There is lot of wisdom in this movie. There is lot of humor in this movie. There is life in this movie... and meaning. This movie shows, how life can be.<br /><br />Peter Falk is in that movie. He is just great! Where is the Oscar for Peter Falk? He deserves it so very much.<br /><br />Peter Falk just turned 80. I do sincerely hope that there will be more movies!<br /><br />Walter J. Langbein\" \"The most irritating thing about \\\"Dies d\\'agost\\\" (August Days) is not simply that NOTHING HAPPENS in this film but that director Marc Recha has the nerve to pretend that this film is some sort of homage to leftist Catalan journalist Ramon Barnils. Unless mentioning Barnils\\' name a few times constitutes an \\\"homage,\\\" this pretense is an utter fraud. You will learn virtually nothing about Barnils in this film nor about the Spanish Civil War (1936-1939) nor about the special role of Catalunya in that war. You also will not learn about the collective punishment inflicted on the heroic Catalan people for years afterward by the victorious and vindictive Franco.<br /><br />The footage of the Catalan countryside is very beautiful, of course, but \\\"Dies d\\'agost\\\" does not have an extensive and varied enough collection of such scenes to qualify as a travelogue. The large number of stills shown -- not very illuminating images of the forest floor, for example -- is the clearest indication of the paucity of ideas here. The aimless drift of brothers Marc and David during their camping trip does not produce compelling cinema. On the contrary, one\\'s strongest impression is of a film made by and for spaced-out, middle-aged hippies. Don\\'t waste your time. Read a good book about the Spanish Civil War instead. (I recommend Felix Morrow\\'s scathingly anti-Stalinist \\\"Revolution and Counter-Revolution in Spain,\\\" which contains a gripping account of the 1937 Barcelona Uprising.)<br /><br />Barry Freed\"]\n"
     ]
    }
   ],
   "source": [
    "# 创建数据管道\n",
    "random.shuffle(filenames)\n",
    "text_ds = tf.data.TextLineDataset(filenames)\n",
    "text_ds = text_ds.shuffle(buffer_size=256)\n",
    "text_ds = text_ds.batch(batch_size)\n",
    "\n",
    "for data in text_ds.take(1):\n",
    "    tf.print(data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 103,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T12:57:24.225987Z",
     "start_time": "2020-06-10T12:57:24.221044Z"
    }
   },
   "outputs": [],
   "source": [
    "# 数据预处理\n",
    "def custom_standardization(input_string):\n",
    "    \"\"\" Remove html line-break tags and handle punctuation \"\"\"\n",
    "\n",
    "    lowercased = tf.strings.lower(input_string)\n",
    "    stripped_html = tf.strings.regex_replace(lowercased, \"<br />\", \" \")\n",
    "    return tf.strings.regex_replace(stripped_html, f\"([{string.punctuation}])\", r\" \\1\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 104,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T12:57:31.214903Z",
     "start_time": "2020-06-10T12:57:25.032664Z"
    }
   },
   "outputs": [],
   "source": [
    "# 文本数据向量化\n",
    "vectorize_layer = TextVectorization(\n",
    "    standardize=custom_standardization,\n",
    "    max_tokens=vocab_size - 1,\n",
    "    output_mode=\"int\",\n",
    "    output_sequence_length=maxlen + 1,\n",
    ")\n",
    "vectorize_layer.adapt(text_ds)\n",
    "\n",
    "\n",
    "# 词汇表，列表的元素类型为 bytes\n",
    "vocab = vectorize_layer.get_vocabulary()  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 108,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T13:01:37.294149Z",
     "start_time": "2020-06-10T13:01:37.153704Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tf.Tensor(\n",
      "[[  13   16    5 ...    0    0    0]\n",
      " [   1  592  165 ...  912    3  278]\n",
      " [  14   16   34 ...  291    8 1539]\n",
      " ...\n",
      " [   1   65    1 ...   54  134   30]\n",
      " [  13    9    5 ...  498 3874    8]\n",
      " [   2  347  466 ...    0    0    0]], shape=(32, 101), dtype=int64)\n",
      "[b\"'s\", b'movie', b'of', b'up', b'cannot', b'same', b'by', b'phil', b'to', b'sometimes', b'historic', b'a', b',', b'things', b'in', b'bold', b'is', b'possibly', b'years', b'gives', b'of', b'mike', b'everyone', b'a', b',', b'-the', b'his', b'version', b'to', b'about', b'of', b'minutes', b'statements', b'a', b',', b'michael', b'lives', b'in', b'awful', b'glory', b'either', b'but', b'into', b'yourself', b'a', b',', b'see', b'in', b'surgeon', b'to', b'screen', b'them', b'phil', b'to', b'bass', b'when', b'ff', b'appears', b'a', b'i', b'in', b'conclusion', b'film', b'made', b'move', b'wouldn', b'to', b'.', b'is', b'of', b'forward', b'tale', b'knowledge', b'a', b'that', b'movie', b'up', b'summer', b'a', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the', b'the']\n"
     ]
    }
   ],
   "source": [
    "for text in text_ds.take(1):\n",
    "    text = tf.expand_dims(text, -1)    \n",
    "    tokenized_sentences = vectorize_layer(text)\n",
    "    print(tokenized_sentences)\n",
    "    print([vocab[idx] for idx in tokenized_sentences[0]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T12:05:04.135105Z",
     "start_time": "2020-06-10T12:05:04.046605Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(<tf.Tensor: shape=(32, 100), dtype=int64, numpy=\n",
      "array([[  11,    2, 1182, ...,    0,    0,    0],\n",
      "       [  30,   15,    1, ..., 7800, 7764,   80],\n",
      "       [  13,    9,    5, ...,  302,    2,  639],\n",
      "       ...,\n",
      "       [  12,  583,   73, ..., 1404,   11,    2],\n",
      "       [  13,   52,  199, ...,   76,    5,  436],\n",
      "       [  61, 1361,   22, ...,   14,    1,   52]])>, <tf.Tensor: shape=(32, 100), dtype=int64, numpy=\n",
      "array([[   2, 1182,    7, ...,    0,    0,    0],\n",
      "       [  15,    1,    4, ..., 7764,   80, 2533],\n",
      "       [   9,    5, 2152, ...,    2,  639,   29],\n",
      "       ...,\n",
      "       [ 583,   73,  149, ...,   11,    2,   71],\n",
      "       [  52,  199,    8, ...,    5,  436,    4],\n",
      "       [1361,   22,    4, ...,    1,   52,   84]])>)\n"
     ]
    }
   ],
   "source": [
    "# 建训练数据转换成 输入和标签\n",
    "def prepare_lm_inputs_labels(text):\n",
    "    text = tf.expand_dims(text, -1)\n",
    "    tokenized_sentences = vectorize_layer(text)\n",
    "    x = tokenized_sentences[:, :-1] \n",
    "    y = tokenized_sentences[:, 1:] # 标签相对于输入，后移一位\n",
    "    return x, y\n",
    "\n",
    "\n",
    "text_ds = text_ds.map(prepare_lm_inputs_labels)\n",
    "text_ds = text_ds.prefetch(tf.data.experimental.AUTOTUNE)\n",
    "\n",
    "for data in text_ds.take(1):\n",
    "    print(data)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 文本生成\n",
    "以回调函数的形式实现"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T12:05:04.169359Z",
     "start_time": "2020-06-10T12:05:04.162802Z"
    }
   },
   "outputs": [],
   "source": [
    "# 文本生成回调函数\n",
    "class TextGenerator(keras.callbacks.Callback):\n",
    "    def __init__(self,\n",
    "                 max_tokens,\n",
    "                 start_tokens,\n",
    "                 index_to_word,\n",
    "                 top_k=10,\n",
    "                 print_every=1):\n",
    "        self.max_tokens = max_tokens\n",
    "        self.start_tokens = start_tokens\n",
    "        self.index_to_word = index_to_word\n",
    "        self.print_every = print_every\n",
    "        self.k = top_k\n",
    "\n",
    "    def sample_from(self, logits):\n",
    "        logits, indices = tf.math.top_k(logits, k=self.k, sorted=True)\n",
    "        indices = np.asarray(indices).astype(\"int32\")\n",
    "        preds = keras.activations.softmax(tf.expand_dims(logits, 0))[0]\n",
    "        preds = np.asarray(preds).astype(\"float32\")\n",
    "        return np.random.choice(indices, p=preds)\n",
    "\n",
    "    def detokenize(self, number):\n",
    "        return self.index_to_word[number]\n",
    "\n",
    "    def on_epoch_end(self, epoch, logs=None):\n",
    "        start_tokens = [_ for _ in self.start_tokens]\n",
    "        if (epoch + 1) % self.print_every != 0:\n",
    "            return\n",
    "        num_tokens_generated = 0\n",
    "        tokens_generated = []\n",
    "        while num_tokens_generated <= self.max_tokens:\n",
    "            pad_len = maxlen - len(start_tokens)\n",
    "            sample_index = len(start_tokens) - 1\n",
    "            if pad_len < 0:\n",
    "                x = start_tokens[:maxlen]\n",
    "                sample_index = maxlen - 1\n",
    "            elif pad_len > 0:\n",
    "                x = start_tokens + [0] * pad_len\n",
    "            else:\n",
    "                x = start_tokens\n",
    "            x = np.array([x])\n",
    "            y, _ = self.model.predict(x)\n",
    "            sample_token = self.sample_from(y[0][sample_index])\n",
    "            tokens_generated.append(sample_token)\n",
    "            start_tokens.append(sample_token)\n",
    "            num_tokens_generated = len(tokens_generated)\n",
    "        txt = \" \".join(\n",
    "            [self.detokenize(_) for _ in self.start_tokens + tokens_generated])\n",
    "        print(f\"generated text:\\n{txt}\\n\")\n",
    "\n",
    "    def sample_from(self, logits):\n",
    "        logits, indices = tf.math.top_k(logits, k=self.k, sorted=True)\n",
    "        indices = np.asarray(indices).astype(\"int32\")\n",
    "        preds = keras.activations.softmax(tf.expand_dims(logits, 0))[0]\n",
    "        preds = np.asarray(preds).astype(\"float32\")\n",
    "        return np.random.choice(indices, p=preds)\n",
    "\n",
    "    def detokenize(self, number):\n",
    "        return self.index_to_word[number]\n",
    "\n",
    "    def on_epoch_end(self, epoch, logs=None):\n",
    "        start_tokens = [_ for _ in self.start_tokens]\n",
    "        if (epoch + 1) % self.print_every != 0:\n",
    "            return\n",
    "        num_tokens_generated = 0\n",
    "        tokens_generated = []\n",
    "        while num_tokens_generated <= self.max_tokens:\n",
    "            pad_len = maxlen - len(start_tokens)\n",
    "            sample_index = len(start_tokens) - 1\n",
    "            if pad_len < 0:\n",
    "                x = start_tokens[:maxlen]\n",
    "                sample_index = maxlen - 1\n",
    "            elif pad_len > 0:\n",
    "                x = start_tokens + [0] * pad_len\n",
    "            else:\n",
    "                x = start_tokens\n",
    "            x = np.array([x])\n",
    "            y, _ = self.model.predict(x)\n",
    "            sample_token = self.sample_from(y[0][sample_index])\n",
    "            tokens_generated.append(sample_token)\n",
    "            start_tokens.append(sample_token)\n",
    "            num_tokens_generated = len(tokens_generated)\n",
    "\n",
    "        txt = \" \".join([\n",
    "            bytes.decode(self.detokenize(_))\n",
    "            for _ in self.start_tokens + tokens_generated\n",
    "        ])\n",
    "        print(f\"generated text:\\n{txt}\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T12:05:04.201332Z",
     "start_time": "2020-06-10T12:05:04.197412Z"
    }
   },
   "outputs": [],
   "source": [
    "word_to_index = {}\n",
    "for index, word in enumerate(vocab):\n",
    "    word_to_index[word] = index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 91,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T12:05:04.232571Z",
     "start_time": "2020-06-10T12:05:04.230516Z"
    }
   },
   "outputs": [],
   "source": [
    "start_prompt = \"this movie is\"\n",
    "\n",
    "# 需要将单词由 str 转换成 bytes，才能查词典\n",
    "start_tokens = [word_to_index.get(str.encode(_), 1) for _ in start_prompt.split()]\n",
    "num_tokens_generated = 40\n",
    "text_gen_callback = TextGenerator(num_tokens_generated, start_tokens, vocab)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-06-10T12:47:46.470467Z",
     "start_time": "2020-06-10T12:05:04.261118Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/30\n",
      "generated text:\n",
      "this movie is , . , . are to , for movie into a i as because because of star a , for as , gets to , for me could this of going is after were his of into see to up more\n",
      "\n",
      "1575/1575 - 89s - loss: 5.4447 - dense_81_loss: 5.4447\n",
      "Epoch 2/30\n",
      "generated text:\n",
      "this movie is , you intended a , how beautiful is they , for movie of more you and to that something role me , . . . are a i in of into for out , you and to , . are idea\n",
      "\n",
      "1575/1575 - 87s - loss: 4.7760 - dense_81_loss: 4.7760\n",
      "Epoch 3/30\n",
      "generated text:\n",
      "this movie is be ever this , us to be . a be two his out of take take worst so ? get , . . on it , help a i in of both place john is or ever a 's in of\n",
      "\n",
      "1575/1575 - 84s - loss: 4.5598 - dense_81_loss: 4.5598\n",
      "Epoch 4/30\n",
      "generated text:\n",
      "this movie is , kind you and that movie up rating she i movie ok this of you a this enough and i movie of both tired a , see movie up than front a that watch , how ; none that effects i\n",
      "\n",
      "1575/1575 - 73s - loss: 4.4304 - dense_81_loss: 4.4304\n",
      "Epoch 5/30\n",
      "generated text:\n",
      "this movie is , simply a , you movie , you was too features way of into for a that movie ideas should there 's a i movie of up more for a that watch was as cinematography . and that however i movie\n",
      "\n",
      "1575/1575 - 88s - loss: 4.3321 - dense_81_loss: 4.3321\n",
      "Epoch 6/30\n",
      "generated text:\n",
      "this movie is , don try to dialog did like , how you a , acting and to , character his he adds and ) her his can fantastic this , you in of minutes is . and ) i as about with of\n",
      "\n",
      "1575/1575 - 85s - loss: 4.2526 - dense_81_loss: 4.2526\n",
      "Epoch 7/30\n",
      "generated text:\n",
      "this movie is , for storyline to that one time american good that too one get of more american a , you movie up before and - that funny not watch and that too he watch i movie before and ) was didn not\n",
      "\n",
      "1575/1575 - 83s - loss: 4.1834 - dense_81_loss: 4.1834\n",
      "Epoch 8/30\n",
      "generated text:\n",
      "this movie is , you to , for movie , where and , . are you was that comedy and that however i my ! , for a what 's you movie of minutes very and to , acting lives there , you .\n",
      "\n",
      "1575/1575 - 82s - loss: 4.1216 - dense_81_loss: 4.1216\n",
      "Epoch 9/30\n",
      "generated text:\n",
      "this movie is , you suggest though as where a i me of into see and ) 's for movie about with do with of you a that when itself she i movie ok with what i top if and that one being i\n",
      "\n",
      "1575/1575 - 88s - loss: 4.0665 - dense_81_loss: 4.0665\n",
      "Epoch 10/30\n",
      "generated text:\n",
      "this movie is of into you was that one being this from frightening but , into things to deep with , becomes says this 's you a that last of both john is of give really performance i top no but from garbage see\n",
      "\n",
      "1575/1575 - 84s - loss: 4.0165 - dense_81_loss: 4.0165\n",
      "Epoch 11/30\n",
      "generated text:\n",
      "this movie is , you was movie , where is be two to . well may into to , where is be into close and i as doing of into you a 's too their ! all is , where for does could at\n",
      "\n",
      "1575/1575 - 90s - loss: 3.9698 - dense_81_loss: 3.9698\n",
      "Epoch 12/30\n",
      "generated text:\n",
      "this movie is , . is , . and . . are and i as because , . as . as . a this enough and , for person in of minutes looking a 's for ? get could this of into thought for\n",
      "\n",
      "1575/1575 - 84s - loss: 3.9273 - dense_81_loss: 3.9273\n",
      "Epoch 13/30\n",
      "generated text:\n",
      "this movie is , for a , for directed but . and to in he before by they to , movies an one it people i no but of new quickly this had piece some , best in , . and i money into\n",
      "\n",
      "1575/1575 - 87s - loss: 3.8874 - dense_81_loss: 3.8874\n",
      "Epoch 14/30\n",
      "generated text:\n",
      "this movie is good , gets you and movie , acting and , did in , where you was in . who all is , where two that one being a , you me i a , actor beginning in was i movie of\n",
      "\n",
      "1575/1575 - 87s - loss: 3.8510 - dense_81_loss: 3.8510\n",
      "Epoch 15/30\n",
      "generated text:\n",
      "this movie is , for storyline movie , . to me we live a a , actor beginning in was , for in he of into for and ) that last of going is great to , see his up hit a that watch\n",
      "\n",
      "1575/1575 - 83s - loss: 3.8162 - dense_81_loss: 3.8162\n",
      "Epoch 16/30\n",
      "generated text:\n",
      "this movie is of . and of you but of new is , two was that one being like , gets . to , . are is , you and , actor . is , . as where entertaining and effort and to ,\n",
      "\n",
      "1575/1575 - 82s - loss: 3.7841 - dense_81_loss: 3.7841\n",
      "Epoch 17/30\n",
      "generated text:\n",
      "this movie is , . and so could or . a a modern . in of minutes any hour to sweater and guests and guests a . and 's you through one interest other of . and that movie someone joke it , until\n",
      "\n",
      "1575/1575 - 84s - loss: 3.7535 - dense_81_loss: 3.7535\n",
      "Epoch 18/30\n",
      "generated text:\n",
      "this movie is of for but of up before to point and up been to point a i in , . a that watch 's for in of into you and but , for in of into for a that its not watch ,\n",
      "\n",
      "1575/1575 - 89s - loss: 3.7261 - dense_81_loss: 3.7261\n",
      "Epoch 19/30\n",
      "generated text:\n",
      "this movie is of . . a , for as did and acting and , actor films in all is , don loved after is , (my and ) 's you ? of see was in about of over hour film we a that\n",
      "\n",
      "1575/1575 - 85s - loss: 3.6999 - dense_81_loss: 3.6999\n",
      "Epoch 20/30\n",
      "generated text:\n",
      "this movie is of you expensive naschy ) , you movie down more a i movie of up more now at i in he with more with , you in a of minutes is , for as bill but i as . and ,\n",
      "\n",
      "1575/1575 - 86s - loss: 3.6749 - dense_81_loss: 3.6749\n",
      "Epoch 21/30\n",
      "generated text:\n",
      "this movie is 's for a that however i movie more and that my there 's movie and - do i my ! of more you a i as of up more for a i in out of rather was in - do i\n",
      "\n",
      "1575/1575 - 86s - loss: 3.6521 - dense_81_loss: 3.6521\n",
      "Epoch 22/30\n",
      "generated text:\n",
      "this movie is , how for a was me of into easy to , acting movie hit and up gets a that movie someone joke it i a that me being i to movie - rating by , scene and most , \"the for\n",
      "\n",
      "1575/1575 - 71s - loss: 3.6309 - dense_81_loss: 3.6309\n",
      "Epoch 23/30\n",
      "generated text:\n",
      "this movie is , how for ok to i movie of up more for and i movie - than bit a i in of into for was that when not only itself , played that movie - including a that watching he each what\n",
      "\n",
      "1575/1575 - 85s - loss: 3.6106 - dense_81_loss: 3.6106\n",
      "Epoch 24/30\n",
      "generated text:\n",
      "this movie is , spiritual to that show 's for a this even theatre to that movie \"good \" even fan a a a , for person in he up loved to i as of into and into acting to i as up more\n",
      "\n",
      "1575/1575 - 86s - loss: 3.5920 - dense_81_loss: 3.5920\n",
      "Epoch 25/30\n",
      "generated text:\n",
      "this movie is 's for a that one of into live and to that however i movie out of take here so as of both john and to movie - rating to that watching each , action was that my one sky if this\n",
      "\n",
      "1575/1575 - 90s - loss: 3.5741 - dense_81_loss: 3.5741\n",
      "Epoch 26/30\n",
      "generated text:\n",
      "this movie is , couldn spiritual and to that when not our was , you in he was do a , acting in - do i in a , actor reason . . and to , . his about - do was and that\n",
      "\n",
      "1575/1575 - 89s - loss: 3.5573 - dense_81_loss: 3.5573\n",
      "Epoch 27/30\n",
      "generated text:\n",
      "this movie is even theatre and even behind for and to that watching win was that know a a a a a force movie of into for a , character know he considering is a a a british last he all is thing two\n",
      "\n",
      "1575/1575 - 89s - loss: 3.5420 - dense_81_loss: 3.5420\n",
      "Epoch 28/30\n",
      "generated text:\n",
      "this movie is 's you to me can more years this even view and , where that too which of death for there i movie a , see person in of doesn mine a that one effects 's for but of new 've and\n",
      "\n",
      "1575/1575 - 87s - loss: 3.5269 - dense_81_loss: 3.5269\n",
      "Epoch 29/30\n",
      "generated text:\n",
      "this movie is 's for suggest a , doubt is , for -as to i in of up loved blood a a a a a plain i as , actor films movie of doesn and time doesn . a , actor once such have\n",
      "\n",
      "1575/1575 - 86s - loss: 3.5127 - dense_81_loss: 3.5127\n",
      "Epoch 30/30\n",
      "generated text:\n",
      "this movie is , for storyline to movie , their more now movie this 's for a that watch , acting movie out 's for to that watch , years well into a , acting know , for movie , did well about of\n",
      "\n",
      "1575/1575 - 91s - loss: 3.4994 - dense_81_loss: 3.4994\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<tensorflow.python.keras.callbacks.History at 0x7fe360711c10>"
      ]
     },
     "execution_count": 92,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model = create_model()\n",
    "model.fit(text_ds, verbose=2, epochs=30, callbacks=[text_gen_callback])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "jupytext": {
   "cell_metadata_filter": "-all",
   "formats": "py:light,ipynb",
   "notebook_metadata_filter": "-all"
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
