{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[b'the', b'and', b'a', b'of', b'to', b'is', b'in', b'it', b'i', b'this', b'that', b'was', b'as', b'for', b'with', b'movie', b'but', b'film', b'on', b'not', b'you', b'his', b'are', b'have', b'be', b'he', b'one', b'its', b'at', b'all', b'by', b'an', b'they', b'from', b'who', b'so', b'like', b'her', b'just', b'or', b'about', b'has', b'if', b'out', b'some', b'there', b'what', b'good', b'more', b'when', b'very', b'she', b'even', b'my', b'no', b'would', b'up', b'time', b'only', b'which', b'story', b'really', b'their', b'were', b'had', b'see', b'can', b'me', b'than', b'we', b'much', b'well', b'get', b'been', b'will', b'into', b'people', b'also', b'other', b'do', b'bad', b'because', b'great', b'first', b'how', b'him', b'most', b'dont', b'made', b'then', b'them', b'films', b'movies', b'way', b'make', b'could', b'too', b'any', b'after', b'characters']\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from matplotlib import pyplot as plt\n",
    "import tensorflow as tf\n",
    "from tensorflow.keras import models,layers,preprocessing,optimizers,losses,metrics\n",
    "from tensorflow.keras.layers.experimental.preprocessing import TextVectorization\n",
    "import re,string\n",
    "\n",
    "import os\n",
    "train_data_path = os.path.join(r'E:\\nlp_data\\imdb','train.csv')\n",
    "test_data_path = os.path.join(r'E:\\nlp_data\\imdb','test.csv')\n",
    "\n",
    "MAX_WORDS = 10000\n",
    "MAX_LEN = 200\n",
    "BATCH_SIZE = 20\n",
    "\n",
    "def split_line(line):\n",
    "    arr = tf.strings.split(line,'\\t')\n",
    "    label = tf.expand_dims(tf.cast(tf.strings.to_number(arr[0]),tf.int32),axis=0)\n",
    "    text = tf.expand_dims(arr[1],axis=0)\n",
    "    return (text,label)\n",
    "\n",
    "ds_train_raw = tf.data.TextLineDataset(filenames=[train_data_path]) \\\n",
    "    .map(split_line,num_parallel_calls=tf.data.experimental.AUTOTUNE) \\\n",
    "    .shuffle(buffer_size = 1000).batch(BATCH_SIZE) \\\n",
    "    .prefetch(tf.data.experimental.AUTOTUNE)\n",
    "\n",
    "ds_test_raw = tf.data.TextLineDataset(filenames=[test_data_path]) \\\n",
    "    .map(split_line,num_parallel_calls=tf.data.experimental.AUTOTUNE) \\\n",
    "    .batch(BATCH_SIZE) \\\n",
    "    .prefetch(tf.data.experimental.AUTOTUNE)\n",
    "\n",
    "def clean_text(text):\n",
    "    lowercast = tf.strings.lower(text)\n",
    "    stripped_html = tf.strings.regex_replace(lowercast,'<br />',\" \")\n",
    "    clearned_punctuation = tf.strings.regex_replace(stripped_html,\n",
    "                                                    '[%s]' % re.escape(string.punctuation),\"\")\n",
    "    return clearned_punctuation\n",
    "\n",
    "vectorize_layer = TextVectorization(\n",
    "    standardize=clean_text,\n",
    "    split='whitespace',\n",
    "    max_tokens=MAX_WORDS - 1,\n",
    "    output_mode='int',\n",
    "    output_sequence_length=MAX_LEN\n",
    ")\n",
    "\n",
    "ds_text = ds_train_raw.map(lambda text,label:text)\n",
    "vectorize_layer.adapt(ds_text)\n",
    "print(vectorize_layer.get_vocabulary()[0:100])\n",
    "\n",
    "ds_train = ds_train_raw.map(lambda text,label:(vectorize_layer(text),label)) \\\n",
    "    .prefetch(tf.data.experimental.AUTOTUNE)\n",
    "ds_text = ds_test_raw.map(lambda text,label:(vectorize_layer(text),label)) \\\n",
    "    .prefetch(tf.data.experimental.AUTOTUNE)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"cnn_model\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "embedding (Embedding)        multiple                  70000     \n",
      "_________________________________________________________________\n",
      "conv_1 (Conv1D)              multiple                  576       \n",
      "_________________________________________________________________\n",
      "max_pooling1d (MaxPooling1D) multiple                  0         \n",
      "_________________________________________________________________\n",
      "conv_2 (Conv1D)              multiple                  4224      \n",
      "_________________________________________________________________\n",
      "flatten (Flatten)            multiple                  0         \n",
      "_________________________________________________________________\n",
      "dense (Dense)                multiple                  6145      \n",
      "=================================================================\n",
      "Total params: 80,945\n",
      "Trainable params: 80,945\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "tf.keras.backend.clear_session()\n",
    "\n",
    "class CnnModel(models.Model):\n",
    "    def __init__(self):\n",
    "        super(CnnModel,self).__init__()\n",
    "\n",
    "    def build(self,input_shape):\n",
    "        self.embedding = layers.Embedding(MAX_WORDS,7,input_length=MAX_LEN)\n",
    "        self.conv_1 = layers.Conv1D(16,kernel_size=5,name='conv_1',activation='relu')\n",
    "        self.pool = layers.MaxPool1D()\n",
    "        self.conv_2 = layers.Conv1D(128,kernel_size=2,name='conv_2',activation='relu')\n",
    "        self.flatten = layers.Flatten()\n",
    "        self.dense = layers.Dense(1,activation='sigmoid')\n",
    "        super(CnnModel,self).build(input_shape)\n",
    "\n",
    "    def call(self,x):\n",
    "        x = self.embedding(x)\n",
    "        x = self.conv_1(x)\n",
    "        x = self.pool(x)\n",
    "        x = self.conv_2(x)\n",
    "        x = self.pool(x)\n",
    "        x = self.flatten(x)\n",
    "        x = self.dense(x)\n",
    "        return x\n",
    "\n",
    "model = CnnModel()\n",
    "model.build(input_shape=(None,MAX_LEN))\n",
    "model.summary()\n",
    "\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [],
   "source": [
    "@tf.function\n",
    "def printbar():\n",
    "    ts  = tf.timestamp()\n",
    "    today_ts = ts % (24*60*60)\n",
    "\n",
    "    hour = tf.cast(today_ts//3600 + 8,tf.int32) % tf.constant(24)\n",
    "    minite = tf.cast((today_ts % 3600) // 60,tf.int32)\n",
    "    second = tf.cast(tf.floor(today_ts % 60),tf.int32)\n",
    "\n",
    "    def timeformat(m):\n",
    "        if tf.strings.length(tf.strings.format(\"{}\",m)) == 1:\n",
    "            return (tf.strings.format(\"0{}\",m))\n",
    "        else:\n",
    "            return (tf.strings.format(\"{}\",m))\n",
    "\n",
    "    timestring = tf.strings.join([\n",
    "        timeformat(hour),timeformat(minite),\n",
    "        timeformat(second)\n",
    "    ],separator=\":\")\n",
    "    tf.print(\"============\" * 8,end = \"\")\n",
    "    tf.print(timestring)\n",
    "\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================================================================================================23:19:12\r\n",
      "Epoch=1,Loss:0.441780865,Accuracy:0.7703,Valid Loss:0.329592139,Valid Accuracy:0.8614\r\n",
      "\r\n",
      "================================================================================================23:19:18\r\n",
      "Epoch=2,Loss:0.249349,Accuracy:0.9035,Valid Loss:0.341415077,Valid Accuracy:0.8632\r\n",
      "\r\n",
      "================================================================================================23:19:25\r\n",
      "Epoch=3,Loss:0.181223,Accuracy:0.932,Valid Loss:0.365655631,Valid Accuracy:0.8678\r\n",
      "\r\n",
      "================================================================================================23:19:31\r\n",
      "Epoch=4,Loss:0.125265956,Accuracy:0.95585,Valid Loss:0.44199,Valid Accuracy:0.8634\r\n",
      "\r\n",
      "================================================================================================23:19:37\r\n",
      "Epoch=5,Loss:0.0790437385,Accuracy:0.973,Valid Loss:0.618559778,Valid Accuracy:0.8534\r\n",
      "\r\n",
      "================================================================================================23:19:43\r\n",
      "Epoch=6,Loss:0.0471161604,Accuracy:0.98405,Valid Loss:0.741176367,Valid Accuracy:0.8552\r\n",
      "\r\n"
     ]
    }
   ],
   "source": [
    "optimizer = optimizers.Nadam()\n",
    "loss_func = losses.BinaryCrossentropy()\n",
    "\n",
    "train_loss = metrics.Mean(name='train_loss')\n",
    "train_metric = metrics.BinaryAccuracy(name='train_accuracy')\n",
    "\n",
    "valid_loss = metrics.Mean(name='valid_loss')\n",
    "valid_metric = metrics.BinaryAccuracy(name='valid_accuracy')\n",
    "\n",
    "@tf.function\n",
    "def train_step(model,features,labels):\n",
    "    with tf.GradientTape() as tape:\n",
    "        predictions = model(features,training = True)\n",
    "        loss = loss_func(labels,predictions)\n",
    "\n",
    "    gradients = tape.gradient(loss,model.trainable_variables)\n",
    "    optimizer.apply_gradients(zip(gradients,model.trainable_variables))\n",
    "\n",
    "    train_loss.update_state(loss)\n",
    "    train_metric.update_state(labels,predictions)\n",
    "\n",
    "@tf.function\n",
    "def valid_step(model,features,labels):\n",
    "    predictions = model(features,training=False)\n",
    "    batch_loss = loss_func(labels,predictions)\n",
    "    valid_loss.update_state(batch_loss)\n",
    "    valid_metric.update_state(labels,predictions)\n",
    "\n",
    "def train_model(model,ds_train,ds_valid,epochs):\n",
    "    for epoch in tf.range(1,epochs + 1):\n",
    "        for features,labels in ds_train:\n",
    "            train_step(model,features,labels)\n",
    "\n",
    "        for features,labels in ds_valid:\n",
    "            valid_step(model,features,labels)\n",
    "\n",
    "        #此处logs模板需要根据metric具体情况修改\n",
    "        logs = 'Epoch={},Loss:{},Accuracy:{},Valid Loss:{},Valid Accuracy:{}'\n",
    "\n",
    "        if epoch % 1 == 0:\n",
    "            printbar()\n",
    "            tf.print(tf.strings.format(logs,\n",
    "            (epoch,train_loss.result(),train_metric.result(),valid_loss.result(),valid_metric.result())))\n",
    "            tf.print(\"\")\n",
    "\n",
    "        train_loss.reset_states()\n",
    "        valid_loss.reset_states()\n",
    "        train_metric.reset_states()\n",
    "        valid_metric.reset_states()\n",
    "\n",
    "train_model(model,ds_train,ds_text,epochs = 6)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "outputs": [],
   "source": [
    "def evaluate_model(model,ds_valid):\n",
    "    for features, labels in ds_valid:\n",
    "         valid_step(model,features,labels)\n",
    "    logs = 'Valid Loss:{},Valid Accuracy:{}'\n",
    "    tf.print(tf.strings.format(logs,(valid_loss.result(),valid_metric.result())))\n",
    "\n",
    "    valid_loss.reset_states()\n",
    "    train_metric.reset_states()\n",
    "    valid_metric.reset_states()"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Valid Loss:0.741176367,Valid Accuracy:0.8552\r\n"
     ]
    }
   ],
   "source": [
    "evaluate_model(model,ds_text)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [
    {
     "data": {
      "text/plain": "array([[0.77151763],\n       [0.9999442 ],\n       [0.9984062 ],\n       ...,\n       [0.9015982 ],\n       [0.7602847 ],\n       [1.        ]], dtype=float32)"
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.predict(ds_text)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: ./model/tf_model_savemodel/0\\assets\n",
      "export saved model\n",
      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n"
     ]
    },
    {
     "data": {
      "text/plain": "array([[0.77151763],\n       [0.9999442 ],\n       [0.9984062 ],\n       ...,\n       [0.9015982 ],\n       [0.7602847 ],\n       [1.        ]], dtype=float32)"
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.save('./model/tf_model_savemodel/0',save_format='tf')\n",
    "print(\"export saved model\")\n",
    "\n",
    "model_loaded = tf.keras.models.load_model('./model/tf_model_savemodel/0')\n",
    "model_loaded.predict(ds_text)\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "keras",
   "language": "python",
   "display_name": "keras"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}