{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/lawbda/env/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import pathlib\n",
    "import pickle\n",
    "import os\n",
    "import json"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "gpu_id='0'\n",
    "def init_env():\n",
    "    os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n",
    "    os.environ['CUDA_VISIBLE_DEVICES']=gpu_id\n",
    "\n",
    "init_env()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import KFold\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.metrics import accuracy_score"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 对象准备"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class batcher:\n",
    "    def __init__(self,data_length,batch_size,seed=17,shuffle_type='strict'):\n",
    "        '''\n",
    "        seed 表示随机数种子\n",
    "        shuffle 表示是否进行随机 默认是随机的\n",
    "        shuffle_type 有几种取值 strict表示严格模式 该模式下取batch之间交集为0 \n",
    "            ease模式下每次取batch完全随机 各个batch之间可能存在相交的情况\n",
    "            no_shuffle 表示顺序进行取用\n",
    "        data_length 表示需要取batch的数据的总规模 必须字段\n",
    "        \n",
    "        '''\n",
    "        self.seed = seed\n",
    "        np.random.seed(seed)\n",
    "        self.shuffle_type = shuffle_type\n",
    "        # 初始化时就确定batch_size\n",
    "        self.data_length = data_length\n",
    "        self.batch_size = batch_size\n",
    "        # 用于进行记录的文件 默认是个dict就行\n",
    "        self.log_data = {}\n",
    "        # 标定当前取了多少数\n",
    "        self.has_fetch = 0\n",
    "        # 每次取用数据之后 所有id都加入batch_num中 当模式为strict时 取数需要不在batch_numlist中出现\n",
    "        self.batch_num = []\n",
    "        \n",
    "        \n",
    "        \n",
    "    def __save(self,obj,fname,ftype='json'):\n",
    "        if ftype == 'json':\n",
    "            with open(fname) as file:\n",
    "                json.dump(self.log_data,file,ensure_ascii=False)\n",
    "        else:\n",
    "            pass\n",
    "    def __load(self,fname,ftype='json'):\n",
    "        pass\n",
    "    def __log(self):\n",
    "        '''\n",
    "        构造log_data对象 主要是本次取batch的名称\n",
    "        batch中标号和数目信息\n",
    "        输入一个名字就行\n",
    "        '''\n",
    "        self.log_data['seed'] = self.seed\n",
    "        self.log_data['shuffle_type'] = self.shuffle_type\n",
    "        self.log_data['batch_num'] = self.batch_num\n",
    "        self.log_data['batch_size'] = self.batch_size\n",
    "    def __random(self,batch_size,ban_list):\n",
    "        '''\n",
    "        取batch_size大小数目的随机数ban_list为不取的数\n",
    "        '''\n",
    "        count = 0\n",
    "        number = []\n",
    "        while len(number) < batch_size:\n",
    "            rd = np.random.randint(0,self.data_length)\n",
    "            if not rd in number and not rd in ban_list:\n",
    "                number.append(rd)\n",
    "                count += 1\n",
    "            if count + len(ban_list) >= self.data_length:\n",
    "                break\n",
    "        return number\n",
    "    \n",
    "    def get_batch(self,feature,label):\n",
    "        '''\n",
    "        通过输入featrue和label以及batch_size进行对应数据的取用\n",
    "        '''\n",
    "        if self.has_fetch >= self.data_length:\n",
    "        # 当当前的取数超过数据规模 重新进行取数\n",
    "            self.has_fetch = 0\n",
    "            self.batch_num = []\n",
    "        volume = self.has_fetch\n",
    "        index = [i for i in range(volume,volume + self.batch_size) if i<self.data_length]\n",
    "        if self.shuffle_type == 'strict':\n",
    "            index = self.__random(batch_size=self.batch_size,ban_list=self.batch_num)\n",
    "        elif self.shuffle_type == 'ease':\n",
    "            index = self.__random(batch_size=self.batch_size,ban_list=[])\n",
    "        else:\n",
    "            pass\n",
    "        self.batch_num += index\n",
    "        self.has_fetch += self.batch_size\n",
    "        return feature[np.array(index)],label[np.array(index)]\n",
    "     \n",
    "    def write_log(self,fname):\n",
    "        self.__log()\n",
    "        self.__save(self.log_data,fname,ftype='json')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class WordsNameNumber:\n",
    "    '''\n",
    "    输入一个篇章 处理 得到词频字典\n",
    "    对字典进行排序 按照词频由大到小\n",
    "    按照排序结果给词进行编码 由 1 开始（便于在cnn中使用）\n",
    "    可以设置停用词词典 若有词典则在进行排序时就删除这些词\n",
    "    '''\n",
    "    def __init__(self,data=None,stopwords=None,tokenizer=None):\n",
    "        '''\n",
    "        输入有三个\n",
    "        data 表示输入的篇章 和sklearn中的feature_extraction保持一致\n",
    "        输入一个list list中为string句子 词之间用空格分开\n",
    "        stopwords 为一个list 一个元素代表一个停用词\n",
    "        tokenizer表示分词器 默认用空格进行分词 可以传入一个函数 之后会用于进行分词\n",
    "        '''\n",
    "        self.data = data\n",
    "        self.stopwords = stopwords\n",
    "        self.tokenizer = tokenizer\n",
    "        \n",
    "        self.wordsdict = {}\n",
    "        \n",
    "        # 词表相关\n",
    "        self.vocab = []\n",
    "        self.vocab_size = 0\n",
    "        self.word2index = {}\n",
    "    \n",
    "    def set_vocab(self,vocab=None,wordsdict=None,ratio=1.0,max_num=None):\n",
    "        '''\n",
    "        统一的词表计算接口\n",
    "        分为导入词表和计算词表两种\n",
    "        当输入wordsdict不为None时 根据wordsdict中的词和词频对应关系进行计算\n",
    "        取整体词前ratio百分比的词 按照降序作为词表\n",
    "        当 wordsdict 为 None 而vocab不为None时 直接进行指定\n",
    "        '''\n",
    "        if None == wordsdict:\n",
    "            if not None == vocab:\n",
    "                self.vocab = vocab\n",
    "        elif not None == wordsdict:\n",
    "            # 先对词进行排序\n",
    "            self.vocab = sorted(self.wordsdict,key=lambda x:self.wordsdict[x],reverse=True)\n",
    "            num_words = 0\n",
    "            if not None == max_num:\n",
    "                # 当设定了取多少词 就不用再进行ratio的比较计算了\n",
    "                num_words = max_num\n",
    "            else:\n",
    "                for num,i in enumerate(range(32),start=1):\n",
    "                    num_words = sum([0 if wordsdict[i]<num*num else 1 for i in wordsdict.keys()])\n",
    "                    ratio_i = float(num_words) /len(wordsdict.keys())\n",
    "                    if ratio_i < ratio:\n",
    "                        break\n",
    "            \n",
    "            self.vocab = self.vocab[:num_words]\n",
    "            self.vocab_size = num_words\n",
    "        \n",
    "        # 上述计算中得到的vocab是有序的 因此下面进行的编码过程也是有序的\n",
    "        for index,w in enumerate(self.vocab):\n",
    "            self.word2index[w] = index\n",
    "        \n",
    "    def fit(self, data=None, ratio=1.0, max_num=None):\n",
    "        '''\n",
    "        构建词典\n",
    "        之后根据stopwords进行修正\n",
    "        之后按照词频排序\n",
    "        之后输出编码词典\n",
    "        '''\n",
    "        if not None == data:\n",
    "            self.data = data\n",
    "        if None == self.data:\n",
    "            print('No data')\n",
    "            return {}\n",
    "        self.__check()\n",
    "        for line in self.data:\n",
    "            words = self.tokenizer(line)\n",
    "            if len(words) == 0:\n",
    "                continue\n",
    "            for word in words:\n",
    "                if word in self.stopwords:\n",
    "                    continue\n",
    "                if not word in self.wordsdict.keys():\n",
    "                    self.wordsdict[word] = 1\n",
    "                else:\n",
    "                    self.wordsdict[word] += 1\n",
    "        self.set_vocab(wordsdict=self.wordsdict,ratio=1.0, max_num=max_num)\n",
    "        \n",
    "        \n",
    "    def transform(self,data,padding=False):\n",
    "        '''\n",
    "        输入一个篇章 使用tokenizer进行分词之后\n",
    "        按照已经fit得到的编号字典对篇章进行编号\n",
    "        输出一个词编号的矩阵 格式为numpy.array\n",
    "        每一行表示一个篇章中的句子的编码形式\n",
    "        当padding为True时 输出需要保持所有的行长度一致\n",
    "        此处需要注意 当transform输入的词存在集外词的时候 默认将其编号为0\n",
    "        '''\n",
    "        if None == data:\n",
    "            print('No data')\n",
    "            return np.array([])\n",
    "        if not type(data) == list:\n",
    "            data = [data]\n",
    "        self.__check()\n",
    "        numMatrix = []\n",
    "        maxLen = 0\n",
    "        \n",
    "        for line in data:\n",
    "            words = self.tokenizer(line)\n",
    "            maxLen = len(words) if maxLen < len(words) else maxLen\n",
    "            # 此处修改一下 使用vocab进行编码 这样的话就可以使用自定义的词汇表了\n",
    "            numMatrix.append([0 if i not in self.word2index.keys() else self.word2index[i] for i in words])\n",
    "        \n",
    "        if True == padding:\n",
    "            for num,_ in enumerate(numMatrix):\n",
    "                numMatrix[num] += (maxLen - len(numMatrix[num]))*[0]\n",
    "        self.sentence_length = maxLen\n",
    "        return np.array(numMatrix)\n",
    "    \n",
    "    def __check(self):\n",
    "        '''\n",
    "        用于检查目前将要执行的操作是否条件完备\n",
    "        一些程序运行共有条件的check机制\n",
    "        主要是类中的tokenizer是否正确\n",
    "        '''\n",
    "        if None == self.tokenizer:\n",
    "            self.tokenizer = lambda x:x.split(' ')\n",
    "        else:\n",
    "            # cut一次 对部分分词器进行初始化 之后在分词时就不用了\n",
    "            self.tokenizer('我爱北京天安门，天安门上太阳升')\n",
    "        if None == self.stopwords:\n",
    "            self.stopwords = []"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 语料准备"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "content = [line.strip() for line in open('./data/DoubanZH.txt','r',encoding='utf-8').readlines()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "label = []\n",
    "sentence = []\n",
    "for line in content:\n",
    "    l,s = line.split(',')\n",
    "    label.append(0 if l=='10' else 1)\n",
    "    sentence.append(s)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "df = pd.DataFrame(data=[i for i in zip(sentence,label)],columns=['sentence','label'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "document = [i for i in df['sentence']]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 词编号 为Rnn结构进行准备"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "wnn = WordsNameNumber()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "wnn.fit(document)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "numbering = wnn.transform(document,padding=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "numbering.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 这部分数据用在少量的样本上 进行预测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "numbering = numbering[:20000]\n",
    "label = label[:20000]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "border = int(numbering.shape[0] * 0.8)\n",
    "border_dev = int(numbering.shape[0] * 0.9)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "newLabel = np.array([[1 if i == 0 else 0,0 if i== 0 else 1] for i in label])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_x = np.array(numbering[:border])\n",
    "test_x = np.array(numbering[border:border_dev])\n",
    "dev_x = np.array(numbering[border_dev:])\n",
    "\n",
    "train_y = np.array(newLabel[:border])\n",
    "test_y = np.array(newLabel[border:border_dev])\n",
    "dev_y = np.array(newLabel[border_dev:])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "dev_y.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 开始构建RNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class model_text_lstm:\n",
    "    def __init__(self,sentence_length,output_classes,vocab_size,embedding_size,hidden_units=[256,128,64]):\n",
    "        with tf.name_scope('input_group'):\n",
    "            self.x = tf.placeholder(dtype=tf.int32,shape=[None,sentence_length],name='input_x')\n",
    "            self.y = tf.placeholder(dtype=tf.int32,shape=[None,output_classes],name='output_y')\n",
    "            self.dropout_keep_prob = tf.placeholder(dtype=tf.float32,name='dropout_keep_prob')\n",
    "            self.l2_lambda = tf.placeholder(dtype=tf.float32,name='l2_lambda')\n",
    "        with tf.name_scope('Embedding'):\n",
    "            W = tf.Variable(tf.random_uniform([vocab_size,embedding_size],-1.0,1.0),name='Embedding_W')\n",
    "            embedding_chars = tf.nn.embedding_lookup(W,self.x,name='Embedding_lookup')\n",
    "        with tf.name_scope('Multi_lstm'):\n",
    "            lstm_cells = []\n",
    "            for num_unit in hidden_units:\n",
    "                lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=num_unit)\n",
    "                lstm_with_droupout = tf.contrib.rnn.DropoutWrapper(lstm_cell,output_keep_prob=self.dropout_keep_prob)\n",
    "                lstm_cells.append(lstm_with_droupout)\n",
    "            # 设置多层的lstm cell\n",
    "            multi_rnn = tf.contrib.rnn.MultiRNNCell(lstm_cells)\n",
    "            # 之后传入dyncmic_rnn中 接上输入 做一个三层的stack lstm\n",
    "            rnn_out,state_tuple = tf.nn.dynamic_rnn(cell=multi_rnn,\n",
    "                                        inputs=embedding_chars,\n",
    "                                        time_major=False,\n",
    "                                        dtype=tf.float32)\n",
    "            rnn_out = tf.slice(rnn_out,[0,sentence_length -1,0],[-1,1,-1])\n",
    "            rnn_out = tf.squeeze(rnn_out,axis=1)\n",
    "        with tf.name_scope('Dense'):\n",
    "            dense_out = tf.contrib.layers.fully_connected(rnn_out,num_outputs=32,activation_fn=tf.sigmoid)\n",
    "        with tf.name_scope('Output'):\n",
    "            W_o = tf.Variable(tf.random_normal([32,2],dtype=tf.float32),name='W_o')\n",
    "            b_o = tf.Variable(tf.random_normal([2],dtype=tf.float32),name='b_o')\n",
    "            out0 = tf.matmul(dense_out,W_o)\n",
    "            out1 = tf.add(out0,b_o)\n",
    "            output = tf.sigmoid(out1)\n",
    "            \n",
    "        with tf.name_scope('Num_correct'):\n",
    "            label_true = tf.argmax(self.y,1)\n",
    "            logits_true = tf.argmax(output,1)\n",
    "            self.num_correct = tf.reduce_sum(tf.cast(tf.equal(label_true,logits_true),tf.float32))\n",
    "        with tf.name_scope('Accuracy'):\n",
    "            label_true = tf.argmax(self.y,1)\n",
    "            logits_true = tf.argmax(output,1)\n",
    "            self.accuracy = tf.reduce_mean(tf.cast(tf.equal(label_true,logits_true),tf.float32))\n",
    "        with tf.name_scope('Loss'):\n",
    "            losses = tf.losses.sigmoid_cross_entropy(self.y,out1)\n",
    "            self.loss = tf.reduce_mean(losses)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练和测试过程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sentence_length = wnn.sentence_length\n",
    "vocab_size = wnn.vocab_size\n",
    "embedding_size = 100\n",
    "tf.reset_default_graph() "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "rnn = model_text_lstm(sentence_length=sentence_length,\n",
    "                               output_classes=2,\n",
    "                               vocab_size=vocab_size,\n",
    "                               embedding_size=150,)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_batch_size = 512\n",
    "test_batch_size = 128\n",
    "dev_batch_size = 256"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "bcher_train = batcher(data_length=len(train_x),batch_size=train_batch_size)\n",
    "bcher_test = batcher(data_length=len(test_x),batch_size=test_batch_size)\n",
    "bcher_dev = batcher(data_length=len(dev_x),batch_size=dev_batch_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "tf.reset_default_graph()\n",
    "with tf.Session() as sex:\n",
    "    rnn_model = model_text_lstm(sentence_length=sentence_length,\n",
    "                                   output_classes=2,\n",
    "                                   vocab_size=vocab_size,\n",
    "                                   embedding_size=150,)\n",
    "    \n",
    "    optimizer = tf.train.AdagradOptimizer(0.1)\n",
    "    train_target = optimizer.minimize(rnn_model.loss)\n",
    "    \n",
    "    \n",
    "    sex.run(tf.global_variables_initializer())\n",
    "    def train_step(batch_t_x,batch_t_y):\n",
    "        f_dict={\n",
    "            rnn_model.x:batch_t_x,\n",
    "            rnn_model.y:batch_t_y,\n",
    "            rnn_model.dropout_keep_prob:1.0\n",
    "        }\n",
    "        _,acc,loss,cnum = sex.run([train_target,rnn_model.accuracy,rnn_model.loss,rnn_model.num_correct],feed_dict=f_dict)\n",
    "        return acc,loss,cnum\n",
    "    \n",
    "    def test_step(batch_d_x,batch_d_y):\n",
    "        f_dict={\n",
    "            rnn_model.x:batch_d_x,\n",
    "            rnn_model.y:batch_d_y,\n",
    "            rnn_model.dropout_keep_prob:1.0\n",
    "        }\n",
    "        acc,loss,c_num = sex.run([rnn_model.accuracy,rnn_model.loss,rnn_model.num_correct],feed_dict=f_dict)\n",
    "        return acc,loss,c_num\n",
    "    max_acc = - np.inf\n",
    "    early_stop = 0\n",
    "    \n",
    "    for i in range(1000):\n",
    "        # 一次训练过程\n",
    "        print('Step:{0}'.format(i))\n",
    "        #--------------------------------Train-----------------------------------------\n",
    "        train_batch_size = 512\n",
    "        for p in range(0,int(len(train_y)/train_batch_size)):\n",
    "            btrx,btry = bcher_train.get_batch(train_x,train_y)\n",
    "            tracc,trloss,cnum = train_step(btrx,btry)\n",
    "            print('Train acc:{0:12} loss:{1:12} cnum:{2}'.format(tracc,trloss,cnum))\n",
    "        #--------------------------------Dev test---------------------------------------\n",
    "        total_dev_acc = 0\n",
    "        for step_num,q in enumerate(range(0,int(len(dev_y)/dev_batch_size))):\n",
    "            bdx,bdy = bcher_dev.get_batch(dev_x,dev_y)\n",
    "            dacc,dloss,dcnum = test_step(bdx,bdy)\n",
    "            print('Dev acc:{0:12} loss{1:12}'.format(dacc,dloss))\n",
    "            total_dev_acc += dacc\n",
    "        flag_acc = total_dev_acc/(step_num+1)\n",
    "        print('Total Dev acc:{0:12}'.format(flag_acc))\n",
    "        \n",
    "        #----------------------------------Early stop------------------------------------\n",
    "        if max_acc > flag_acc:\n",
    "            early_stop += 1\n",
    "        else:\n",
    "            max_acc = flag_acc\n",
    "            early_stop = 0\n",
    "        if early_stop > 5:\n",
    "            break\n",
    "    print('Best acc:{0:12}'.format(flag_acc))\n",
    "    \n",
    "    #---------------------------------------Test----------------------------------------\n",
    "    total_test_acc = 0\n",
    "    for step_num,r in enumerate(range(0,int(len(test_y)/test_batch_size))):\n",
    "        btex,btey = bcher_test.get_batch(test_x,test_y)\n",
    "        teacc,teloss,tecnum = test_step(btex,btey)\n",
    "        print('Test acc:{0:12} loss:{1:12}'.format(teacc,teloss))\n",
    "        total_test_acc += teacc\n",
    "    max_acc = total_test_acc/(step_num+1)\n",
    "    print('Test final acc:{0:12}'.format(max_acc))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 总结\n",
    "到了都没调明白，RNN的loss一直都是最多稳定在0.69左右不再下降。不管结构如何调整，最后都是这样的一个结果，所以有点头大。\n",
    "先写点别的结构试试看吧，针对这个RNN之后再进行调整。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
