{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/lawbda/env/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import pathlib\n",
    "import pickle\n",
    "import os "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "gpu_id='0'\n",
    "def init_env():\n",
    "    os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n",
    "    os.environ['CUDA_VISIBLE_DEVICES']=gpu_id\n",
    "\n",
    "init_env()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import KFold\n",
    "\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "\n",
    "from sklearn.metrics import accuracy_score"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义一个取样本batch的对象"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class batcher:\n",
    "    '''\n",
    "    分batch\n",
    "    输入数据 根据容量上限进行batch划分\n",
    "    需要指定batch_size\n",
    "    '''\n",
    "    def __init__(self,seed=17):\n",
    "        '''\n",
    "        初始化 设定随机数种子\n",
    "        以及取用数据时的集合\n",
    "        '''\n",
    "        np.random.seed(seed)\n",
    "        \n",
    "    def __randint(self,maxlen,batch_size):\n",
    "        '''\n",
    "        输出batch_size的随机数\n",
    "        '''\n",
    "        number = []\n",
    "        while len(number) < batch_size:\n",
    "            rd = np.random.randint(0,maxlen)\n",
    "            if not rd in number:\n",
    "                number.append(rd)\n",
    "        return number\n",
    "    \n",
    "    def get_batch(self,data,label,batch_size):\n",
    "        '''\n",
    "        输出给定的数据的batch_size大小的对象\n",
    "        '''\n",
    "        feature = data\n",
    "        mark = np.array(label)\n",
    "        maxlen = len(label)\n",
    "        randint = self.__randint(maxlen,batch_size,)\n",
    "        return feature[randint],mark[randint]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 构建一个给词编码的对象"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class WordsNameNumber:\n",
    "    '''\n",
    "    输入一个篇章 处理 得到词频字典\n",
    "    对字典进行排序 按照词频由大到小\n",
    "    按照排序结果给词进行编码 由 1 开始（便于在cnn中使用）\n",
    "    可以设置停用词词典 若有词典则在进行排序时就删除这些词\n",
    "    '''\n",
    "    def __init__(self,data=None,stopwords=None,tokenizer=None):\n",
    "        '''\n",
    "        输入有三个\n",
    "        data 表示输入的篇章 和sklearn中的feature_extraction保持一致\n",
    "        输入一个list list中为string句子 词之间用空格分开\n",
    "        stopwords 为一个list 一个元素代表一个停用词\n",
    "        tokenizer表示分词器 默认用空格进行分词 可以传入一个函数 之后会用于进行分词\n",
    "        '''\n",
    "        self.data = data\n",
    "        self.stopwords = stopwords\n",
    "        self.tokenizer = tokenizer\n",
    "        \n",
    "        self.wordsdict = {}\n",
    "        \n",
    "        # 词表相关\n",
    "        self.vocab = []\n",
    "        self.vocab_size = 0\n",
    "        self.word2index = {}\n",
    "    \n",
    "    def set_vocab(self,vocab=None,wordsdict=None,ratio=1.0,max_num=None):\n",
    "        '''\n",
    "        统一的词表计算接口\n",
    "        分为导入词表和计算词表两种\n",
    "        当输入wordsdict不为None时 根据wordsdict中的词和词频对应关系进行计算\n",
    "        取整体词前ratio百分比的词 按照降序作为词表\n",
    "        当 wordsdict 为 None 而vocab不为None时 直接进行指定\n",
    "        '''\n",
    "        if None == wordsdict:\n",
    "            if not None == vocab:\n",
    "                self.vocab = vocab\n",
    "        elif not None == wordsdict:\n",
    "            # 先对词进行排序\n",
    "            self.vocab = sorted(self.wordsdict,key=lambda x:self.wordsdict[x],reverse=True)\n",
    "            num_words = 0\n",
    "            if not None == max_num:\n",
    "                # 当设定了取多少词 就不用再进行ratio的比较计算了\n",
    "                num_words = max_num\n",
    "            else:\n",
    "                for num,i in enumerate(range(32),start=1):\n",
    "                    num_words = sum([0 if wordsdict[i]<num*num else 1 for i in wordsdict.keys()])\n",
    "                    ratio_i = float(num_words) /len(wordsdict.keys())\n",
    "                    if ratio_i < ratio:\n",
    "                        break\n",
    "            \n",
    "            self.vocab = self.vocab[:num_words]\n",
    "            self.vocab_size = num_words\n",
    "        \n",
    "        # 上述计算中得到的vocab是有序的 因此下面进行的编码过程也是有序的\n",
    "        for index,w in enumerate(self.vocab):\n",
    "            self.word2index[w] = index\n",
    "        \n",
    "    def fit(self, data=None, ratio=1.0, max_num=None):\n",
    "        '''\n",
    "        构建词典\n",
    "        之后根据stopwords进行修正\n",
    "        之后按照词频排序\n",
    "        之后输出编码词典\n",
    "        '''\n",
    "        if not None == data:\n",
    "            self.data = data\n",
    "        if None == self.data:\n",
    "            print('No data')\n",
    "            return {}\n",
    "        self.__check()\n",
    "        for line in self.data:\n",
    "            words = self.tokenizer(line)\n",
    "            if len(words) == 0:\n",
    "                continue\n",
    "            for word in words:\n",
    "                if word in self.stopwords:\n",
    "                    continue\n",
    "                if not word in self.wordsdict.keys():\n",
    "                    self.wordsdict[word] = 1\n",
    "                else:\n",
    "                    self.wordsdict[word] += 1\n",
    "        self.set_vocab(wordsdict=self.wordsdict,ratio=1.0, max_num=max_num)\n",
    "        \n",
    "        \n",
    "    def transform(self,data,padding=False):\n",
    "        '''\n",
    "        输入一个篇章 使用tokenizer进行分词之后\n",
    "        按照已经fit得到的编号字典对篇章进行编号\n",
    "        输出一个词编号的矩阵 格式为numpy.array\n",
    "        每一行表示一个篇章中的句子的编码形式\n",
    "        当padding为True时 输出需要保持所有的行长度一致\n",
    "        此处需要注意 当transform输入的词存在集外词的时候 默认将其编号为0\n",
    "        '''\n",
    "        if None == data:\n",
    "            print('No data')\n",
    "            return np.array([])\n",
    "        if not type(data) == list:\n",
    "            data = [data]\n",
    "        self.__check()\n",
    "        numMatrix = []\n",
    "        maxLen = 0\n",
    "        \n",
    "        for line in data:\n",
    "            words = self.tokenizer(line)\n",
    "            maxLen = len(words) if maxLen < len(words) else maxLen\n",
    "            # 此处修改一下 使用vocab进行编码 这样的话就可以使用自定义的词汇表了\n",
    "            numMatrix.append([0 if i not in self.word2index.keys() else self.word2index[i] for i in words])\n",
    "        \n",
    "        if True == padding:\n",
    "            for num,_ in enumerate(numMatrix):\n",
    "                numMatrix[num] += (maxLen - len(numMatrix[num]))*[0]\n",
    "        self.sentence_length = maxLen\n",
    "        return np.array(numMatrix)\n",
    "    \n",
    "    def __check(self):\n",
    "        '''\n",
    "        用于检查目前将要执行的操作是否条件完备\n",
    "        一些程序运行共有条件的check机制\n",
    "        主要是类中的tokenizer是否正确\n",
    "        '''\n",
    "        if None == self.tokenizer:\n",
    "            self.tokenizer = lambda x:x.split(' ')\n",
    "        else:\n",
    "            # cut一次 对部分分词器进行初始化 之后在分词时就不用了\n",
    "            self.tokenizer('我爱北京天安门，天安门上太阳升')\n",
    "        if None == self.stopwords:\n",
    "            self.stopwords = []"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 语料准备"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "content = [line.strip() for line in open('./data/DoubanZH.txt','r',encoding='utf-8').readlines()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "label = []\n",
    "sentence = []\n",
    "for line in content:\n",
    "    l,s = line.split(',')\n",
    "    label.append(0 if l=='10' else 1)\n",
    "    sentence.append(s)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "df = pd.DataFrame(data=[i for i in zip(sentence,label)],columns=['sentence','label'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "document = [i for i in df['sentence']]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 词编号 为Rnn结构进行准备"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "wnn = WordsNameNumber()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "wnn.fit(document)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "numbering = wnn.transform(document,padding=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(274012, 192)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "numbering.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 这部分数据用在少量的样本上 进行预测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "numbering = numbering[:20000]\n",
    "label = label[:20000]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "border = int(numbering.shape[0] * 0.8)\n",
    "border_dev = int(numbering.shape[0] * 0.9)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "newLabel = np.array([[1 if i == 0 else 0,0 if i== 0 else 1] for i in label])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_x =numbering[:border]\n",
    "test_x = numbering[border:border_dev]\n",
    "dev_x = numbering[border_dev:]\n",
    "\n",
    "train_y = newLabel[:border]\n",
    "test_y = newLabel[border:border_dev]\n",
    "dev_y = newLabel[border_dev:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(2000, 2)"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dev_y.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 开始构建RNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "bcher_train = batcher()\n",
    "bcher_test = batcher()\n",
    "bcher_dev = batcher()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [],
   "source": [
    "class model_text_rnn:\n",
    "    def __init__(self,sentence_length,output_classes,vocab_size,embedding_size,num_units):\n",
    "        '''\n",
    "        首先明确一下RNN结构的输入到底为何\n",
    "        此处设定的RNN的输入是 batch*sentence_length的。为简便，将sentence_length写为sl\n",
    "        即一次输入batch个句子，每个句子的长度为sl，基本上在输入时就已经padding\n",
    "        但是在RNNcell中的输入则需要以词为单位进行输入\n",
    "        即对一个句子中的单词进行batch的划分，则实际上进行输入的词tensor为\n",
    "        sl*embedding_dim\n",
    "        即词的batch为一个句子长 以一个句子为batch进行词的输入（padding部分使用<unk>或者<end>表示句子结尾）\n",
    "        而对应的词需要过一个embedding化为一个向量 这样才能进行正确的RNN的操作\n",
    "        和CNN不同的地方就在这里 网络结构不同对于输入的确定就不一样 这个是之后值得思考的\n",
    "        因此需要重新设计网络结构\n",
    "        '''\n",
    "        \n",
    "        self.x = tf.placeholder(dtype = tf.int32, shape=[None, sentence_length],name='input_x')\n",
    "        self.y = tf.placeholder(dtype = tf.int32, shape=[None, output_classes], name='input_y')\n",
    "        self.dropout_keep_prob = tf.placeholder(dtype= tf.float32, name='dropout_keep_prob')\n",
    "        \n",
    "        with tf.name_scope('Embedding'):\n",
    "            W = tf.Variable(tf.random_uniform([vocab_size,embedding_size],-1.0,1.0),name='W')\n",
    "            # 进行查找\n",
    "            self.embedded_chars = tf.nn.embedding_lookup(W,self.x)\n",
    "            \n",
    "            \n",
    "            \n",
    "            only_conv = tf.expand_dims(self.embedded_chars,axis=-1)\n",
    "        with tf.name_scope('Real_CNN_layer'):\n",
    "            filter_cell = tf.Variable(tf.truncated_normal([3,embedding_size,1,1], stddev=0.1))\n",
    "            conv_layer = tf.nn.conv2d(only_conv,filter=filter_cell,strides=[1,1,1,1],padding='VALID')\n",
    "            maxpool = tf.nn.max_pool(conv_layer,ksize=[1,1,1,1],strides=[1,1,1,1],padding='VALID')\n",
    "            maxpool = tf.squeeze(maxpool,axis=-1)\n",
    "            maxpool = tf.squeeze(maxpool,axis=-1)\n",
    "        \n",
    "        \n",
    "        \n",
    "        \n",
    "#         with tf.name_scope('Main_rnn'):\n",
    "#             # 设定一个rnn cell\n",
    "# #             rnn_cell = tf.contrib.rnn.BasicRNNCell(num_units=num_units,activation=tf.nn.relu)\n",
    "            \n",
    "#             rnn_cell = tf.contrib.rnn.BasicLSTMCell(num_units=num_units,activation=tf.nn.relu)\n",
    "#             # initial_state = rnn_cell.zero_state(sentence_length, dtype=tf.float32)\n",
    "# #             print('init state shape',initial_state.get_shape())\n",
    "#             rnn_with_dropout = tf.contrib.rnn.DropoutWrapper(rnn_cell,output_keep_prob=self.dropout_keep_prob)\n",
    "#             out, state = tf.nn.dynamic_rnn(cell=rnn_with_dropout,\n",
    "#                                            inputs=self.embedded_chars,\n",
    "#                                            time_major=False,\n",
    "#                                            dtype=tf.float32)\n",
    "        \n",
    "#         # 此处需要思考一个问题 就是输入句子时 实际上是有每一个句子的label的\n",
    "#         # 但是进行RNNcell的输入时 每次是以一个句子长的batch进行词的输入的 此时粒度变化了\n",
    "#         # 该如何进行label和每个句子的对应呢\n",
    "        \n",
    "#         # 使用cnn对embedding层进行处理\n",
    "        \n",
    "#         with tf.name_scope('Cnn_reduce_layer'):\n",
    "            \n",
    "#             # filter 应用于2D的conv层时 四个维度为 filter_height, filter_width, in_channels, out_channels\n",
    "#             filter_m = tf.Variable(tf.truncated_normal([3,num_units,1,1], stddev=0.1))\n",
    "            \n",
    "#             # 添加一维 组成 batch height width channel四维\n",
    "#             out = tf.expand_dims(out,axis=-1)\n",
    "            \n",
    "#             conv2Reduce = tf.nn.conv2d(input=out,filter=filter_m, strides=[1,1,1,1], padding='VALID',)\n",
    "#             conv2MaxPooling = tf.nn.max_pool(conv2Reduce,ksize=[1,1,1,1],strides=[1,1,1,1],padding='VALID')\n",
    "#             conv2MaxPooling = tf.squeeze(conv2MaxPooling,axis=-1)\n",
    "#             conv2MaxPooling = tf.squeeze(conv2MaxPooling,axis=-1)\n",
    "            \n",
    "            \n",
    "        with tf.name_scope('Dense'):\n",
    "            W = tf.Variable(tf.random_uniform([sentence_length - 3 + 1,32],dtype=tf.float32))\n",
    "            b = tf.Variable(tf.random_uniform([32],dtype=tf.float32))\n",
    "            d_out = tf.add(tf.matmul(maxpool,W),b)\n",
    "            d_out = tf.sigmoid(d_out)\n",
    "            \n",
    "        with tf.name_scope('Output'):\n",
    "            W_o = tf.Variable(tf.random_uniform([32,2],dtype=tf.float32))\n",
    "            b_o = tf.Variable(tf.random_uniform([2],dtype=tf.float32))\n",
    "\n",
    "            d_out = tf.add(tf.matmul(d_out,W_o),b_o)\n",
    "            self.d_out = tf.sigmoid(d_out)\n",
    "        \n",
    "        with tf.name_scope('loss'):\n",
    "            losses = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.y,logits=d_out)\n",
    "            self.loss = tf.reduce_mean(losses)\n",
    "\n",
    "        with tf.name_scope('accuracy'):\n",
    "            prediction = tf.argmax(self.d_out, 1)\n",
    "            label = tf.argmax(self.y,1)\n",
    "            self.accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, label),tf.float32))\n",
    "            \n",
    "        with tf.name_scope('num_correct'):\n",
    "            prediction = tf.argmax(self.d_out, 1)\n",
    "            label = tf.argmax(self.y,1)\n",
    "            self.num_correct = tf.reduce_sum(tf.cast(tf.equal(prediction,label),tf.float32))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练和测试过程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [],
   "source": [
    "sentence_length = wnn.sentence_length\n",
    "vocab_size = wnn.vocab_size\n",
    "embedding_size = 100\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step:0\n",
      "Train acc:  0.52734375 loss:0.7037423253059387 cnum:270.0\n",
      "Train acc: 0.509765625 loss:0.6998043060302734 cnum:261.0\n",
      "Train acc:   0.4765625 loss:0.7001437544822693 cnum:244.0\n",
      "Train acc: 0.541015625 loss:0.6920114159584045 cnum:277.0\n",
      "Train acc:  0.51953125 loss:0.692615270614624 cnum:266.0\n",
      "Train acc:  0.48046875 loss:0.6964969635009766 cnum:246.0\n",
      "Train acc: 0.517578125 loss:0.692829966545105 cnum:265.0\n",
      "Train acc: 0.572265625 loss:0.6903554797172546 cnum:293.0\n",
      "Train acc: 0.521484375 loss:0.6925531029701233 cnum:267.0\n",
      "Train acc:   0.4765625 loss:0.6976292133331299 cnum:244.0\n",
      "Train acc:  0.47265625 loss:0.6943084001541138 cnum:242.0\n",
      "Train acc: 0.513671875 loss:0.6929281949996948 cnum:263.0\n",
      "Train acc:  0.49609375 loss:0.6934125423431396 cnum:254.0\n",
      "Train acc: 0.498046875 loss:0.6932007670402527 cnum:255.0\n",
      "Train acc:    0.515625 loss:0.6930599808692932 cnum:264.0\n",
      "Train acc:    0.515625 loss:0.6928111910820007 cnum:264.0\n",
      "Train acc: 0.509765625 loss:0.6929567456245422 cnum:261.0\n",
      "Train acc:  0.51171875 loss:0.6928777694702148 cnum:262.0\n",
      "Train acc: 0.501953125 loss:0.6932921409606934 cnum:257.0\n",
      "Train acc: 0.494140625 loss:0.6934269666671753 cnum:253.0\n",
      "Train acc: 0.474609375 loss:0.6933891177177429 cnum:243.0\n",
      "Train acc:     0.46875 loss:0.6943254470825195 cnum:240.0\n",
      "Train acc:  0.44921875 loss:0.6946235299110413 cnum:230.0\n",
      "Train acc: 0.533203125 loss:0.6915955543518066 cnum:273.0\n",
      "Train acc:   0.4921875 loss:0.6948034763336182 cnum:252.0\n",
      "Train acc:   0.4609375 loss:0.6950419545173645 cnum:236.0\n",
      "Train acc:  0.48828125 loss:0.6936653256416321 cnum:250.0\n",
      "Train acc:         0.5 loss:0.6931478381156921 cnum:256.0\n",
      "Train acc: 0.498046875 loss:0.6931504011154175 cnum:255.0\n",
      "Train acc:   0.5390625 loss:0.6930704116821289 cnum:276.0\n",
      "Train acc: 0.505859375 loss:0.6932488083839417 cnum:259.0\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "too many values to unpack (expected 3)",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-65-8aa7c2cc7914>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     49\u001b[0m         \u001b[0;32mfor\u001b[0m \u001b[0mstep_num\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mq\u001b[0m \u001b[0;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdev_y\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0mdev_batch_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     50\u001b[0m             \u001b[0mbdx\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mbdy\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbcher_dev\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdev_x\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdev_y\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdev_batch_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 51\u001b[0;31m             \u001b[0mdacc\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdloss\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdcnum\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtest_step\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbdx\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mbdy\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     52\u001b[0m             \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Dev acc:{0:12} loss{1:12}'\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdacc\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdloss\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     53\u001b[0m             \u001b[0mtotal_dev_acc\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mdacc\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-65-8aa7c2cc7914>\u001b[0m in \u001b[0;36mtest_step\u001b[0;34m(batch_d_x, batch_d_y)\u001b[0m\n\u001b[1;32m     28\u001b[0m             \u001b[0mrnn_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdropout_keep_prob\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;36m0.5\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     29\u001b[0m         }\n\u001b[0;32m---> 30\u001b[0;31m         \u001b[0macc\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mc_num\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msex\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mrnn_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maccuracy\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mrnn_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mrnn_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnum_correct\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mrnn_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnum_correct\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mf_dict\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     31\u001b[0m         \u001b[0;32mreturn\u001b[0m \u001b[0macc\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mc_num\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     32\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mValueError\u001b[0m: too many values to unpack (expected 3)"
     ]
    }
   ],
   "source": [
    "tf.reset_default_graph() \n",
    "with tf.Session() as sex:\n",
    "    writer = tf.summary.FileWriter('./tfb_file/rnn/1')\n",
    "    rnn_model = model_text_rnn(sentence_length=sentence_length,\n",
    "                               output_classes=2,\n",
    "                               vocab_size=vocab_size,\n",
    "                               embedding_size=150,\n",
    "                               num_units=256)\n",
    "    \n",
    "    optimizer = tf.train.AdagradOptimizer(1)\n",
    "    train_target = optimizer.minimize(rnn_model.loss)\n",
    "    \n",
    "    \n",
    "    sex.run(tf.global_variables_initializer())\n",
    "    def train_step(batch_t_x,batch_t_y):\n",
    "        f_dict={\n",
    "            rnn_model.x:batch_t_x,\n",
    "            rnn_model.y:batch_t_y,\n",
    "            rnn_model.dropout_keep_prob:0.5\n",
    "        }\n",
    "        _,acc,loss,cnum = sex.run([train_target,rnn_model.accuracy,rnn_model.loss,rnn_model.num_correct],feed_dict=f_dict)\n",
    "        return acc,loss,cnum\n",
    "    \n",
    "    def test_step(batch_d_x,batch_d_y):\n",
    "        f_dict={\n",
    "            rnn_model.x:batch_d_x,\n",
    "            rnn_model.y:batch_d_y,\n",
    "            rnn_model.dropout_keep_prob:0.5\n",
    "        }\n",
    "        acc,loss,c_num = sex.run([rnn_model.accuracy,rnn_model.loss,rnn_model.num_correct],feed_dict=f_dict)\n",
    "        return acc,loss,c_num\n",
    "    \n",
    "    max_acc = - np.inf\n",
    "    early_stop = 0\n",
    "    \n",
    "    writer.add_graph(sex.graph)\n",
    "    for i in range(1000):\n",
    "        # 一次训练过程\n",
    "        print('Step:{0}'.format(i))\n",
    "        #--------------------------------Train-----------------------------------------\n",
    "        train_batch_size = 512\n",
    "        for p in range(0,int(len(train_y)/train_batch_size)):\n",
    "            btrx,btry = bcher_train.get_batch(train_x,train_y,train_batch_size)\n",
    "            tracc,trloss,cnum = train_step(btrx,btry)\n",
    "            print('Train acc:{0:12} loss:{1:12} cnum:{2}'.format(tracc,trloss,cnum))\n",
    "        #--------------------------------Dev test---------------------------------------\n",
    "        dev_batch_size = 256\n",
    "        total_dev_acc = 0\n",
    "        for step_num,q in enumerate(range(0,int(len(dev_y)/dev_batch_size))):\n",
    "            bdx,bdy = bcher_dev.get_batch(dev_x,dev_y,dev_batch_size)\n",
    "            dacc,dloss,dcnum = test_step(bdx,bdy)\n",
    "            print('Dev acc:{0:12} loss{1:12}'.format(dacc,dloss))\n",
    "            total_dev_acc += dacc\n",
    "        flag_acc = total_dev_acc/(step_num+1)\n",
    "        print('Total Dev acc:{0:12}'.format(flag_acc))\n",
    "        \n",
    "        #----------------------------------Early stop------------------------------------\n",
    "        if max_acc > flag_acc:\n",
    "            early_stop += 1\n",
    "        else:\n",
    "            max_acc = flag_acc\n",
    "            early_stop = 0\n",
    "        if early_stop > 5:\n",
    "            break\n",
    "    print('Best acc:{0:12}'.format(flag_acc))\n",
    "    \n",
    "    #---------------------------------------Test----------------------------------------\n",
    "    test_batch_size = 128\n",
    "    total_test_acc = 0\n",
    "    for step_num,r in enumerate(range(0,int(len(test_y)/test_batch_size))):\n",
    "        btex,btey = bcher_test.get_batch(test_x,test_y,test_batch_size)\n",
    "        teacc,teloss,tecnum = test_step(btex,btey)\n",
    "        print('Test acc:{0:12} loss:{1:12}'.format(teacc,teloss))\n",
    "        total_test_acc += teacc\n",
    "    max_acc = total_test_acc/(step_num+1)\n",
    "    print('Test final acc:{0:12}'.format(max_acc))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 总结\n",
    "到了都没调明白，RNN的loss一直都是最多稳定在0.69左右不再下降。不管结构如何调整，最后都是这样的一个结果，所以有点头大。\n",
    "先写点别的结构试试看吧，针对这个RNN之后再进行调整。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "# graph = tf.Graph()\n",
    "# with graph.as_default():\n",
    "#     session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)\n",
    "#     sess = tf.Session(config=session_conf)\n",
    "    \n",
    "#     # 在这里开启session\n",
    "#     with sess.as_default():\n",
    "        \n",
    "#         writer = tf.summary.FileWriter('./tfb_file/rnn/1')\n",
    "        \n",
    "#         # rnn 模型\n",
    "#         rnn = model_text_rnn(sentence_length=sentence_length, \n",
    "#                        output_classes=2,\n",
    "#                        vocab_size=vocab_size,\n",
    "#                        embedding_size=embedding_size,\n",
    "#                        num_units=128,\n",
    "#                       )\n",
    "        \n",
    "#         optimizer = tf.train.GradientDescentOptimizer(0.1)\n",
    "#         train_op = optimizer.minimize(rnn.loss)\n",
    "        \n",
    "#         # compute_gradients 和 apply_gradients 为minimize函数的两个部分\n",
    "#         # 实际上就是将minimize函数拆分了\n",
    "#         # compute_gradients 返回一个以元组(gradient, variable)组成的列表\n",
    "# #         grads_and_vars = optimizer.compute_gradients(rnn.loss)\n",
    "        \n",
    "#         # apply_gradients 将计算出的梯度应用到变量上 \n",
    "#         # 是函数minimize()的第二部分，返回一个应用指定的梯度的操作Operation 对global_step做自增操作\n",
    "# #         train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n",
    "     \n",
    "#         # 定义一个函数 针对每一次训练 每次调用时 进行一个batch的训练\n",
    "#         def train_step(batch_x,batch_y):\n",
    "#             feed_dict = {\n",
    "#                 rnn.x:batch_x,\n",
    "#                 rnn.y:batch_y,\n",
    "#                 rnn.dropout_keep_prob:1.0\n",
    "#             }\n",
    "#             _,loss,acc = sess.run([train_op, rnn.loss, rnn.accuracy],feed_dict)\n",
    "#             return acc\n",
    "#         # 定义一个函数 针对每一次测试和验证 进行一个batch的测试或者验证\n",
    "#         def dev_step(dev_x,dev_y):\n",
    "#             feed_dict = {\n",
    "#                 rnn.x:dev_x,\n",
    "#                 rnn.y:dev_y,\n",
    "#                 rnn.dropout_keep_prob:1.0\n",
    "#             }\n",
    "#             loss,acc,num_correct = sess.run([rnn.loss,rnn.accuracy,rnn.num_correct],feed_dict)\n",
    "#             return num_correct\n",
    "        \n",
    "#         max_acc = - np.inf\n",
    "#         early_stop = 0\n",
    "        \n",
    "#         writer.add_graph(sess.graph)\n",
    "#         sess.run(tf.global_variables_initializer())\n",
    "#         for i in range(1000):\n",
    "#             print('Epoch:{0}'.format(i))\n",
    "#             # -------------------------训练过程-----------------------------\n",
    "#             for p in range(0,int(len(train_y)/512)):\n",
    "#                 batch_train_x,batch_train_y = bcher_train.get_batch(train_x,train_y,512)\n",
    "#                 t_acc = train_step(batch_train_x,batch_train_y)\n",
    "            \n",
    "#             #--------------------------开发集测试过程---------------------\n",
    "#             total_dev_correct = 0\n",
    "#             for q in range(0,int(len(dev_y)/128)):\n",
    "#                 batch_dev_x,batch_dev_y = bcher_dev.get_batch(dev_x,dev_y,128)\n",
    "#                 num_dev_correct = dev_step(batch_dev_x,batch_dev_y)\n",
    "#                 total_dev_correct += num_dev_correct\n",
    "#             dev_acc = float(total_dev_correct) / len(dev_y)\n",
    "#             print('Dev total acc:{0}'.format(dev_acc))\n",
    "            \n",
    "            \n",
    "#             # ------------------------Early stop 检测--------------------------\n",
    "#             if max_acc > dev_acc:\n",
    "#                 early_stop += 1\n",
    "#             else:\n",
    "#                 max_acc = dev_acc\n",
    "#                 early_stop = 0\n",
    "            \n",
    "#             if early_stop > 5:\n",
    "#                 break\n",
    "#         # ------------------------------测试集测试过程------------------------------\n",
    "#         print('Best acc:{0}'.format(max_acc))\n",
    "#         total_test_correct = 0\n",
    "#         for r in range(0,int(len(test_y)/128)):\n",
    "#             batch_test_x,batch_test_y = bcher_test.get_batch(test_x,test_y,128)\n",
    "#             num_test_correct = dev_step(batch_test_x,batch_test_y)\n",
    "#             total_test_correct += num_test_correct\n",
    "#         test_acc = float(total_test_correct) / len(test_y)\n",
    "#         print('Test total acc:{0}'.format(test_acc))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
